code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Vic Fryzel <vf@google.com>'
import unittest
import atom.core
from gdata import test_data
import gdata.calendar_resource.data
import gdata.test_config as conf
class CalendarResourceEntryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.CALENDAR_RESOURCE_ENTRY,
gdata.calendar_resource.data.CalendarResourceEntry)
self.feed = atom.core.parse(test_data.CALENDAR_RESOURCES_FEED,
gdata.calendar_resource.data.CalendarResourceFeed)
def testCalendarResourceEntryFromString(self):
self.assert_(isinstance(self.entry,
gdata.calendar_resource.data.CalendarResourceEntry))
self.assertEquals(self.entry.resource_id, 'CR-NYC-14-12-BR')
self.assertEquals(self.entry.resource_common_name, 'Boardroom')
self.assertEquals(self.entry.resource_description,
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'))
self.assertEquals(self.entry.resource_type, 'CR')
def testCalendarResourceFeedFromString(self):
self.assertEquals(len(self.feed.entry), 2)
self.assert_(isinstance(self.feed,
gdata.calendar_resource.data.CalendarResourceFeed))
self.assert_(isinstance(self.feed.entry[0],
gdata.calendar_resource.data.CalendarResourceEntry))
self.assert_(isinstance(self.feed.entry[1],
gdata.calendar_resource.data.CalendarResourceEntry))
self.assertEquals(
self.feed.entry[0].find_edit_link(),
'https://apps-apis.google.com/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR')
self.assertEquals(self.feed.entry[0].resource_id, 'CR-NYC-14-12-BR')
self.assertEquals(self.feed.entry[0].resource_common_name, 'Boardroom')
self.assertEquals(self.feed.entry[0].resource_description,
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'))
self.assertEquals(self.feed.entry[0].resource_type, 'CR')
self.assertEquals(self.feed.entry[1].resource_id,
'(Bike)-London-43-Lobby-Bike-1')
self.assertEquals(self.feed.entry[1].resource_common_name, 'London bike-1')
self.assertEquals(self.feed.entry[1].resource_description,
'Bike is in London at building 43\'s lobby.')
self.assertEquals(self.feed.entry[1].resource_type, '(Bike)')
self.assertEquals(
self.feed.entry[1].find_edit_link(),
'https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/(Bike)-London-43-Lobby-Bike-1')
def suite():
return conf.build_suite([CalendarResourceEntryTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Vic Fryzel <vf@google.com>'
import unittest
import gdata.client
import gdata.data
import gdata.gauth
import gdata.calendar_resource.client
import gdata.calendar_resource.data
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
class CalendarResourceClientTest(unittest.TestCase):
def setUp(self):
self.client = gdata.calendar_resource.client.CalendarResourceClient(
domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.calendar_resource.client.CalendarResourceClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'CalendarResourceClientTest',
self.client.auth_service, True)
def tearDown(self):
conf.close_client(self.client)
def testClientConfiguration(self):
self.assertEqual('apps-apis.google.com', self.client.host)
self.assertEqual('2.0', self.client.api_version)
self.assertEqual('apps', self.client.auth_service)
self.assertEqual(
('http://www.google.com/a/feeds/',
'https://www.google.com/a/feeds/',
'http://apps-apis.google.com/a/feeds/',
'https://apps-apis.google.com/a/feeds/'), self.client.auth_scopes)
if conf.options.get_value('runlive') == 'true':
self.assertEqual(self.client.domain, conf.options.get_value('appsdomain'))
else:
self.assertEqual(self.client.domain, 'example.com')
def testMakeResourceFeedUri(self):
self.assertEqual('/a/feeds/calendar/resource/2.0/%s/' % self.client.domain,
self.client.MakeResourceFeedUri())
self.assertEqual('/a/feeds/calendar/resource/2.0/%s/CR-NYC-14-12-BR'
% self.client.domain,
self.client.MakeResourceFeedUri(resource_id='CR-NYC-14-12-BR'))
self.assertEqual('/a/feeds/calendar/resource/2.0/%s/?test=1'
% self.client.domain,
self.client.MakeResourceFeedUri(params={'test': 1}))
self.assertEqual('/a/feeds/calendar/resource/2.0/%s/CR-NYC-14-12-BR?test=1'
% self.client.domain,
self.client.MakeResourceFeedUri(resource_id='CR-NYC-14-12-BR',
params={'test': 1}))
def testCreateRetrieveUpdateDelete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateUpdateDelete')
try:
new_entry = self.client.CreateResource(
'CR-NYC-14-12-BR', 'Boardroom',
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'), 'CR')
except Exception, e:
print e
self.client.delete_resource('CR-NYC-14-12-BR')
# If the test failed to run to completion
# the resource may already exist
new_entry = self.client.CreateResource(
'CR-NYC-14-12-BR', 'Boardroom',
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'), 'CR')
self.assert_(isinstance(new_entry,
gdata.calendar_resource.data.CalendarResourceEntry))
self.assertEqual(new_entry.resource_id, 'CR-NYC-14-12-BR')
self.assertEqual(new_entry.resource_common_name, 'Boardroom')
self.assertEqual(new_entry.resource_description,
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'))
self.assertEqual(new_entry.resource_type, 'CR')
fetched_entry = self.client.get_resource(resource_id='CR-NYC-14-12-BR')
self.assert_(isinstance(fetched_entry,
gdata.calendar_resource.data.CalendarResourceEntry))
self.assertEqual(fetched_entry.resource_id, 'CR-NYC-14-12-BR')
self.assertEqual(fetched_entry.resource_common_name, 'Boardroom')
self.assertEqual(fetched_entry.resource_description,
('This conference room is in New York City, building 14, floor 12, '
'Boardroom'))
self.assertEqual(fetched_entry.resource_type, 'CR')
new_entry.resource_id = 'CR-MTV-14-12-BR'
new_entry.resource_common_name = 'Executive Boardroom'
new_entry.resource_description = 'This conference room is in Mountain View'
new_entry.resource_type = 'BR'
updated_entry = self.client.update(new_entry)
self.assert_(isinstance(updated_entry,
gdata.calendar_resource.data.CalendarResourceEntry))
self.assertEqual(updated_entry.resource_id, 'CR-MTV-14-12-BR')
self.assertEqual(updated_entry.resource_common_name, 'Executive Boardroom')
self.assertEqual(updated_entry.resource_description,
'This conference room is in Mountain View')
self.assertEqual(updated_entry.resource_type, 'BR')
self.client.delete_resource('CR-NYC-14-12-BR')
def suite():
return conf.build_suite([CalendarResourceClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
from gdata import test_data
import gdata.blogger
import atom
class BlogEntryTest(unittest.TestCase):
def testBlogEntryFromString(self):
entry = gdata.blogger.BlogEntryFromString(test_data.BLOG_ENTRY)
self.assertEquals(entry.GetBlogName(), 'blogName')
self.assertEquals(entry.GetBlogId(), 'blogID')
self.assertEquals(entry.title.text, 'Lizzy\'s Diary')
def testBlogPostFeedFromString(self):
feed = gdata.blogger.BlogPostFeedFromString(test_data.BLOG_POSTS_FEED)
self.assertEquals(len(feed.entry), 1)
self.assert_(isinstance(feed, gdata.blogger.BlogPostFeed))
self.assert_(isinstance(feed.entry[0], gdata.blogger.BlogPostEntry))
self.assertEquals(feed.entry[0].GetPostId(), 'postID')
self.assertEquals(feed.entry[0].GetBlogId(), 'blogID')
self.assertEquals(feed.entry[0].title.text, 'Quite disagreeable')
def testCommentFeedFromString(self):
feed = gdata.blogger.CommentFeedFromString(test_data.BLOG_COMMENTS_FEED)
self.assertEquals(len(feed.entry), 1)
self.assert_(isinstance(feed, gdata.blogger.CommentFeed))
self.assert_(isinstance(feed.entry[0], gdata.blogger.CommentEntry))
self.assertEquals(feed.entry[0].GetBlogId(), 'blogID')
self.assertEquals(feed.entry[0].GetCommentId(), 'commentID')
self.assertEquals(feed.entry[0].title.text, 'This is my first comment')
self.assertEquals(feed.entry[0].in_reply_to.source,
'http://blogName.blogspot.com/feeds/posts/default/postID')
self.assertEquals(feed.entry[0].in_reply_to.ref,
'tag:blogger.com,1999:blog-blogID.post-postID')
self.assertEquals(feed.entry[0].in_reply_to.href,
'http://blogName.blogspot.com/2007/04/first-post.html')
self.assertEquals(feed.entry[0].in_reply_to.type, 'text/html')
def testIdParsing(self):
entry = gdata.blogger.BlogEntry()
entry.id = atom.Id(
text='tag:blogger.com,1999:user-146606542.blog-4023408167658848')
self.assertEquals(entry.GetBlogId(), '4023408167658848')
entry.id = atom.Id(text='tag:blogger.com,1999:blog-4023408167658848')
self.assertEquals(entry.GetBlogId(), '4023408167658848')
class InReplyToTest(unittest.TestCase):
def testToAndFromString(self):
in_reply_to = gdata.blogger.InReplyTo(href='http://example.com/href',
ref='http://example.com/ref', source='http://example.com/my_post',
type='text/html')
xml_string = str(in_reply_to)
parsed = gdata.blogger.InReplyToFromString(xml_string)
self.assertEquals(parsed.source, in_reply_to.source)
self.assertEquals(parsed.href, in_reply_to.href)
self.assertEquals(parsed.ref, in_reply_to.ref)
self.assertEquals(parsed.type, in_reply_to.type)
class CommentEntryTest(unittest.TestCase):
def testToAndFromString(self):
comment = gdata.blogger.CommentEntry(content=atom.Content(text='Nifty!'),
in_reply_to=gdata.blogger.InReplyTo(
source='http://example.com/my_post'))
parsed = gdata.blogger.CommentEntryFromString(str(comment))
self.assertEquals(parsed.in_reply_to.source, comment.in_reply_to.source)
self.assertEquals(parsed.content.text, comment.content.text)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'e.bidelman (Eric Bidelman)'
import unittest
import atom
from gdata import test_data
import gdata.acl.data
import gdata.data
import gdata.docs.data
import gdata.test_config as conf
class DocsHelperTest(unittest.TestCase):
def setUp(self):
pass
def testMakeKindCategory(self):
category = gdata.docs.data.MakeKindCategory('folder')
self.assertEqual(category.label, 'folder')
self.assertEqual(category.scheme, 'http://schemas.google.com/g/2005#kind')
self.assertEqual(
category.term, 'http://schemas.google.com/docs/2007#folder')
category = gdata.docs.data.MakeKindCategory('spreadsheet')
self.assertEqual(category.label, 'spreadsheet')
self.assertEqual(category.scheme, 'http://schemas.google.com/g/2005#kind')
self.assertEqual(
category.term, 'http://schemas.google.com/docs/2007#spreadsheet')
def testMakeContentLinkFromResourceId(self):
link = gdata.docs.data.make_content_link_from_resource_id(
'document%3A1234567890')
self.assertEqual(link, '/feeds/download/documents/Export?docId=1234567890')
link2 = gdata.docs.data.make_content_link_from_resource_id(
'presentation%3A1234567890')
self.assertEqual(
link2, '/feeds/download/presentations/Export?docId=1234567890')
link3 = gdata.docs.data.make_content_link_from_resource_id(
'spreadsheet%3A1234567890')
self.assertEqual(
link3, ('https://spreadsheets.google.com/feeds/download/spreadsheets/'
'Export?key=1234567890'))
# Try an invalid resource id.
exception_raised = False
try:
link4 = gdata.docs.data.make_content_link_from_resource_id('1234567890')
except ValueError, e: # expected
exception_raised = True
self.assert_(exception_raised)
# Try an resource id that cannot be exported.
exception_raised = False
try:
link4 = gdata.docs.data.make_content_link_from_resource_id(
'pdf%3A1234567890')
except ValueError, e: # expected
exception_raised = True
self.assert_(exception_raised)
class DocsEntryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.DOCUMENT_LIST_ENTRY_V3,
gdata.docs.data.DocsEntry)
def testToAndFromStringDocsEntry(self):
self.assert_(isinstance(self.entry, gdata.docs.data.DocsEntry))
self.assertEqual(self.entry.GetDocumentType(), 'spreadsheet')
self.assert_(isinstance(self.entry.last_viewed, gdata.docs.data.LastViewed))
self.assertEqual(self.entry.last_viewed.text, '2009-03-05T07:48:21.493Z')
self.assert_(
isinstance(self.entry.last_modified_by, gdata.docs.data.LastModifiedBy))
self.assertEqual(
self.entry.last_modified_by.email.text, 'test.user@gmail.com')
self.assertEqual(self.entry.last_modified_by.name.text, 'test.user')
self.assert_(isinstance(self.entry.resource_id, gdata.docs.data.ResourceId))
self.assertEqual(self.entry.resource_id.text,
'spreadsheet:supercalifragilisticexpealidocious')
self.assert_(isinstance(self.entry.writers_can_invite,
gdata.docs.data.WritersCanInvite))
self.assertEqual(self.entry.writers_can_invite.value, 'true')
self.assert_(isinstance(self.entry.quota_bytes_used,
gdata.docs.data.QuotaBytesUsed))
self.assertEqual(self.entry.quota_bytes_used.text, '1000')
self.assertEqual(len(self.entry.feed_link), 2)
self.assert_(isinstance(self.entry.feed_link[0], gdata.data.FeedLink))
self.assertEqual(
self.entry.get_acl_feed_link().href,
('https://docs.google.com/feeds/default/private/full/'
'spreadsheet%3Asupercalifragilisticexpealidocious/acl'))
self.assertEqual(
self.entry.get_revisions_feed_link().href,
('https://docs.google.com/feeds/default/private/full/'
'spreadsheet%3Asupercalifragilisticexpealidocious/revisions'))
self.assertEqual(len(self.entry.in_folders()), 1)
self.assertEqual(self.entry.in_folders()[0].title, 'AFolderName')
class AclTest(unittest.TestCase):
def setUp(self):
self.acl_entry = atom.core.parse(test_data.DOCUMENT_LIST_ACL_ENTRY,
gdata.docs.data.Acl)
self.acl_entry_withkey = atom.core.parse(
test_data.DOCUMENT_LIST_ACL_WITHKEY_ENTRY, gdata.docs.data.Acl)
def testToAndFromString(self):
self.assert_(isinstance(self.acl_entry, gdata.docs.data.Acl))
self.assert_(isinstance(self.acl_entry.role, gdata.acl.data.AclRole))
self.assert_(isinstance(self.acl_entry.scope, gdata.acl.data.AclScope))
self.assertEqual(self.acl_entry.scope.value, 'user@gmail.com')
self.assertEqual(self.acl_entry.scope.type, 'user')
self.assertEqual(self.acl_entry.role.value, 'writer')
acl_entry_str = str(self.acl_entry)
new_acl_entry = atom.core.parse(acl_entry_str, gdata.docs.data.Acl)
self.assert_(isinstance(new_acl_entry, gdata.docs.data.Acl))
self.assert_(isinstance(new_acl_entry.role, gdata.acl.data.AclRole))
self.assert_(isinstance(new_acl_entry.scope, gdata.acl.data.AclScope))
self.assertEqual(new_acl_entry.scope.value, self.acl_entry.scope.value)
self.assertEqual(new_acl_entry.scope.type, self.acl_entry.scope.type)
self.assertEqual(new_acl_entry.role.value, self.acl_entry.role.value)
def testToAndFromStringWithKey(self):
self.assert_(isinstance(self.acl_entry_withkey, gdata.docs.data.Acl))
self.assert_(self.acl_entry_withkey.role is None)
self.assert_(isinstance(self.acl_entry_withkey.with_key,
gdata.acl.data.AclWithKey))
self.assert_(isinstance(self.acl_entry_withkey.with_key.role,
gdata.acl.data.AclRole))
self.assert_(isinstance(self.acl_entry_withkey.scope,
gdata.acl.data.AclScope))
self.assertEqual(self.acl_entry_withkey.with_key.key, 'somekey')
self.assertEqual(self.acl_entry_withkey.with_key.role.value, 'writer')
self.assertEqual(self.acl_entry_withkey.scope.value, 'example.com')
self.assertEqual(self.acl_entry_withkey.scope.type, 'domain')
acl_entry_withkey_str = str(self.acl_entry_withkey)
new_acl_entry_withkey = atom.core.parse(acl_entry_withkey_str,
gdata.docs.data.Acl)
self.assert_(isinstance(new_acl_entry_withkey, gdata.docs.data.Acl))
self.assert_(new_acl_entry_withkey.role is None)
self.assert_(isinstance(new_acl_entry_withkey.with_key,
gdata.acl.data.AclWithKey))
self.assert_(isinstance(new_acl_entry_withkey.with_key.role,
gdata.acl.data.AclRole))
self.assert_(isinstance(new_acl_entry_withkey.scope,
gdata.acl.data.AclScope))
self.assertEqual(new_acl_entry_withkey.with_key.key,
self.acl_entry_withkey.with_key.key)
self.assertEqual(new_acl_entry_withkey.with_key.role.value,
self.acl_entry_withkey.with_key.role.value)
self.assertEqual(new_acl_entry_withkey.scope.value,
self.acl_entry_withkey.scope.value)
self.assertEqual(new_acl_entry_withkey.scope.type,
self.acl_entry_withkey.scope.type)
def testCreateNewAclEntry(self):
cat = gdata.atom.Category(
term='http://schemas.google.com/acl/2007#accessRule',
scheme='http://schemas.google.com/g/2005#kind')
acl_entry = gdata.docs.DocumentListAclEntry(category=[cat])
acl_entry.scope = gdata.docs.Scope(value='user@gmail.com', type='user')
acl_entry.role = gdata.docs.Role(value='writer')
self.assert_(isinstance(acl_entry, gdata.docs.DocumentListAclEntry))
self.assert_(isinstance(acl_entry.role, gdata.docs.Role))
self.assert_(isinstance(acl_entry.scope, gdata.docs.Scope))
self.assertEqual(acl_entry.scope.value, 'user@gmail.com')
self.assertEqual(acl_entry.scope.type, 'user')
self.assertEqual(acl_entry.role.value, 'writer')
class AclFeedTest(unittest.TestCase):
def setUp(self):
self.feed = atom.core.parse(test_data.DOCUMENT_LIST_ACL_FEED,
gdata.docs.data.AclFeed)
def testToAndFromString(self):
for entry in self.feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.Acl))
feed = atom.core.parse(str(self.feed), gdata.docs.data.AclFeed)
for entry in feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.Acl))
def testConvertActualData(self):
entries = self.feed.entry
self.assert_(len(entries) == 2)
self.assertEqual(entries[0].title.text,
'Document Permission - user@gmail.com')
self.assertEqual(entries[0].role.value, 'owner')
self.assertEqual(entries[0].scope.type, 'user')
self.assertEqual(entries[0].scope.value, 'user@gmail.com')
self.assert_(entries[0].GetSelfLink() is not None)
self.assert_(entries[0].GetEditLink() is not None)
self.assertEqual(entries[1].title.text,
'Document Permission - user2@google.com')
self.assertEqual(entries[1].role.value, 'writer')
self.assertEqual(entries[1].scope.type, 'domain')
self.assertEqual(entries[1].scope.value, 'google.com')
self.assert_(entries[1].GetSelfLink() is not None)
self.assert_(entries[1].GetEditLink() is not None)
class RevisionFeedTest(unittest.TestCase):
def setUp(self):
self.feed = atom.core.parse(test_data.DOCUMENT_LIST_REVISION_FEED,
gdata.docs.data.RevisionFeed)
def testToAndFromString(self):
for entry in self.feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.Revision))
feed = atom.core.parse(str(self.feed), gdata.docs.data.RevisionFeed)
for entry in feed.entry:
self.assert_(isinstance(entry, gdata.docs.data.Revision))
def testConvertActualData(self):
entries = self.feed.entry
self.assert_(len(entries) == 1)
self.assertEqual(entries[0].title.text, 'Revision 2')
self.assertEqual(entries[0].publish.value, 'true')
self.assertEqual(entries[0].publish_auto.value, 'true')
self.assertEqual(entries[0].publish_outside_domain.value, 'false')
self.assertEqual(
entries[0].GetPublishLink().href,
'https://docs.google.com/View?docid=dfr4&pageview=1&hgd=1')
self.assertEqual(
entries[0].FindPublishLink(),
'https://docs.google.com/View?docid=dfr4&pageview=1&hgd=1')
class DataClassSanityTest(unittest.TestCase):
def test_basic_element_structure(self):
conf.check_data_classes(self, [
gdata.docs.data.ResourceId, gdata.docs.data.LastModifiedBy,
gdata.docs.data.LastViewed, gdata.docs.data.WritersCanInvite,
gdata.docs.data.QuotaBytesUsed, gdata.docs.data.Publish,
gdata.docs.data.PublishAuto, gdata.docs.data.PublishOutsideDomain,
gdata.docs.data.DocsEntry, gdata.docs.data.Acl, gdata.docs.data.AclFeed,
gdata.docs.data.DocList, gdata.docs.data.Revision,
gdata.docs.data.RevisionFeed])
def suite():
return conf.build_suite(
[DataClassSanityTest, DocsHelperTest, DocsEntryTest, AclTest, AclFeed])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = ('api.jfisher (Jeff Fisher), '
'api.eric@google.com (Eric Bidelman)')
import getpass
import os
import re
import StringIO
import time
import unittest
import gdata.docs.service
import gdata.spreadsheet.service
username = ''
password = ''
client = gdata.docs.service.DocsService()
editClient = gdata.docs.service.DocsService()
spreadsheets = gdata.spreadsheet.service.SpreadsheetsService()
class DocumentsListServiceTest(unittest.TestCase):
def setUp(self):
self.client = client
self.editClient = editClient
self.editClient.SetClientLoginToken(client.GetClientLoginToken())
self.editClient.additional_headers = {'If-Match': '*'}
self.spreadsheets = spreadsheets
self.DOCUMENT_CATEGORY = client._MakeKindCategory(gdata.docs.service.DOCUMENT_LABEL)
self.SPREADSHEET_CATEGORY = client._MakeKindCategory(gdata.docs.service.SPREADSHEET_LABEL)
self.PRESENTATION_CATEGORY = client._MakeKindCategory(gdata.docs.service.PRESENTATION_LABEL)
class DocumentListQueryTest(DocumentsListServiceTest):
def setUp(self):
DocumentsListServiceTest.setUp(self)
self.feed = self.client.GetDocumentListFeed()
def testGetDocumentsListFeed(self):
self.assert_(isinstance(self.feed, gdata.docs.DocumentListFeed))
uri = 'http://docs.google.com/feeds/documents/private/full/?max-results=1'
# Query using GetDocumentListFeed()
feed = self.client.GetDocumentListFeed(uri)
self.assert_(isinstance(feed, gdata.docs.DocumentListFeed))
self.assertEqual(len(feed.entry), 1)
self.assertEqual(self.feed.entry[0].id.text, feed.entry[0].id.text)
self.assertEqual(self.feed.entry[0].title.text, feed.entry[0].title.text)
# Query using QueryDocumentListFeed()
feed2 = self.client.QueryDocumentListFeed(uri)
self.assertEqual(len(feed2.entry), 1)
self.assertEqual(self.feed.entry[0].id.text, feed2.entry[0].id.text)
self.assertEqual(self.feed.entry[0].title.text, feed2.entry[0].title.text)
def testGetDocumentsListEntry(self):
self_link = self.feed.entry[0].GetSelfLink().href
entry = self.client.GetDocumentListEntry(self_link)
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.assertEqual(self.feed.entry[0].id.text, entry.id.text)
self.assertEqual(self.feed.entry[0].title.text, entry.title.text)
self.assert_(self.feed.entry[0].resourceId.text is not None)
self.assert_(self.feed.entry[0].lastModifiedBy is not None)
self.assert_(self.feed.entry[0].lastViewed is not None)
def testGetDocumentsListAclFeed(self):
uri = ('http://docs.google.com/feeds/documents/private/full/'
'-/mine?max-results=1')
feed = self.client.GetDocumentListFeed(uri)
feed_link = feed.entry[0].GetAclLink().href
acl_feed = self.client.GetDocumentListAclFeed(feed_link)
self.assert_(isinstance(acl_feed, gdata.docs.DocumentListAclFeed))
self.assert_(isinstance(acl_feed.entry[0], gdata.docs.DocumentListAclEntry))
self.assert_(acl_feed.entry[0].scope is not None)
self.assert_(acl_feed.entry[0].role is not None)
class DocumentListAclTest(DocumentsListServiceTest):
def setUp(self):
DocumentsListServiceTest.setUp(self)
uri = ('http://docs.google.com/feeds/documents/private/full'
'/-/mine?max-results=1')
self.feed = self.client.GetDocumentListFeed(uri)
self.EMAIL = 'x@example.com'
self.SCOPE_TYPE = 'user'
self.ROLE_VALUE = 'reader'
def testCreateAndUpdateAndDeleteAcl(self):
# Add new ACL
scope = gdata.docs.Scope(value=self.EMAIL, type=self.SCOPE_TYPE)
role = gdata.docs.Role(value=self.ROLE_VALUE)
acl_entry = self.client.Post(
gdata.docs.DocumentListAclEntry(scope=scope, role=role),
self.feed.entry[0].GetAclLink().href,
converter=gdata.docs.DocumentListAclEntryFromString)
self.assert_(isinstance(acl_entry, gdata.docs.DocumentListAclEntry))
self.assertEqual(acl_entry.scope.value, self.EMAIL)
self.assertEqual(acl_entry.scope.type, self.SCOPE_TYPE)
self.assertEqual(acl_entry.role.value, self.ROLE_VALUE)
# Update the user's role
ROLE_VALUE = 'writer'
acl_entry.role.value = ROLE_VALUE
updated_acl_entry = self.editClient.Put(
acl_entry, acl_entry.GetEditLink().href,
converter=gdata.docs.DocumentListAclEntryFromString)
self.assertEqual(updated_acl_entry.scope.value, self.EMAIL)
self.assertEqual(updated_acl_entry.scope.type, self.SCOPE_TYPE)
self.assertEqual(updated_acl_entry.role.value, ROLE_VALUE)
# Delete the ACL
self.editClient.Delete(updated_acl_entry.GetEditLink().href)
# Make sure entry was actually deleted
acl_feed = self.client.GetDocumentListAclFeed(
self.feed.entry[0].GetAclLink().href)
for acl_entry in acl_feed.entry:
self.assert_(acl_entry.scope.value != self.EMAIL)
class DocumentListCreateAndDeleteTest(DocumentsListServiceTest):
def setUp(self):
DocumentsListServiceTest.setUp(self)
self.BLANK_TITLE = "blank.txt"
self.TITLE = 'Test title'
self.new_entry = gdata.docs.DocumentListEntry()
self.new_entry.category.append(self.DOCUMENT_CATEGORY)
def testCreateAndDeleteEmptyDocumentSlugHeaderTitle(self):
created_entry = self.client.Post(self.new_entry,
'/feeds/documents/private/full',
extra_headers={'Slug': self.BLANK_TITLE})
self.editClient.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.BLANK_TITLE)
self.assertEqual(created_entry.category[0].label, 'document')
def testCreateAndDeleteEmptyDocumentAtomTitle(self):
self.new_entry.title = gdata.atom.Title(text=self.TITLE)
created_entry = self.client.Post(self.new_entry,
'/feeds/documents/private/full')
self.editClient.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.TITLE)
self.assertEqual(created_entry.category[0].label, 'document')
def testCreateAndDeleteEmptySpreadsheet(self):
self.new_entry.title = gdata.atom.Title(text=self.TITLE)
self.new_entry.category[0] = self.SPREADSHEET_CATEGORY
created_entry = self.client.Post(self.new_entry,
'/feeds/documents/private/full')
self.editClient.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.TITLE)
self.assertEqual(created_entry.category[0].label, 'viewed')
self.assertEqual(created_entry.category[1].label, 'spreadsheet')
def testCreateAndDeleteEmptyPresentation(self):
self.new_entry.title = gdata.atom.Title(text=self.TITLE)
self.new_entry.category[0] = self.PRESENTATION_CATEGORY
created_entry = self.client.Post(self.new_entry,
'/feeds/documents/private/full')
self.editClient.Delete(created_entry.GetEditLink().href)
self.assertEqual(created_entry.title.text, self.TITLE)
self.assertEqual(created_entry.category[0].label, 'viewed')
self.assertEqual(created_entry.category[1].label, 'presentation')
def testCreateAndDeleteFolder(self):
folder_name = 'TestFolder'
folder = self.client.CreateFolder(folder_name)
self.assertEqual(folder.title.text, folder_name)
self.editClient.Delete(folder.GetEditLink().href)
def testCreateAndDeleteFolderInFolder(self):
DEST_FOLDER_NAME = 'TestFolder'
dest_folder = self.client.CreateFolder(DEST_FOLDER_NAME)
CREATED_FOLDER_NAME = 'TestFolder2'
new_folder = self.client.CreateFolder(CREATED_FOLDER_NAME, dest_folder)
for category in new_folder.category:
if category.scheme.startswith(gdata.docs.service.FOLDERS_SCHEME_PREFIX):
self.assertEqual(new_folder.category[0].label, DEST_FOLDER_NAME)
break
# delete the folders we created, this will also delete the child folder
dest_folder = self.client.Get(dest_folder.GetSelfLink().href)
self.editClient.Delete(dest_folder.GetEditLink().href)
class DocumentListMoveInAndOutOfFolderTest(DocumentsListServiceTest):
def setUp(self):
DocumentsListServiceTest.setUp(self)
self.folder_name = 'TestFolder'
self.folder = self.client.CreateFolder(self.folder_name)
self.doc_title = 'TestDoc'
self.ms = gdata.MediaSource(file_path='test.doc',
content_type='application/msword')
def tearDown(self):
folder = self.client.Get(self.folder.GetSelfLink().href)
self.editClient.Delete(folder.GetEditLink().href)
def testUploadDocumentToFolder(self):
created_entry = self.client.Upload(self.ms, self.doc_title,
self.folder)
for category in created_entry.category:
if category.scheme.startswith(gdata.docs.service.FOLDERS_SCHEME_PREFIX):
self.assertEqual(category.label, self.folder_name)
break
# delete the doc we created
created_entry = self.client.Get(created_entry.GetSelfLink().href)
match = re.search('\/(document%3A[^\/]*)\/?.*?\/(.*)$',
created_entry.GetEditLink().href)
edit_uri = 'http://docs.google.com/feeds/documents/private/full/'
edit_uri += '%s/%s' % (match.group(1), match.group(2))
self.editClient.Delete(edit_uri)
def testMoveDocumentInAndOutOfFolder(self):
created_entry = self.client.Upload(self.ms, self.doc_title)
moved_entry = self.client.MoveIntoFolder(created_entry,
self.folder)
for category in moved_entry.category:
if category.scheme.startswith(gdata.docs.service.FOLDERS_SCHEME_PREFIX):
self.assertEqual(category.label, self.folder_name)
break
self.editClient.MoveOutOfFolder(moved_entry)
moved_entry = self.client.Get(moved_entry.GetSelfLink().href)
for category in moved_entry.category:
starts_with_folder__prefix = category.scheme.startswith(
gdata.docs.service.FOLDERS_SCHEME_PREFIX)
self.assert_(not starts_with_folder__prefix)
created_entry = self.client.Get(created_entry.GetSelfLink().href)
self.editClient.Delete(created_entry.GetEditLink().href)
def testMoveFolderIntoFolder(self):
dest_folder_name = 'DestFolderName'
dest_folder = self.client.CreateFolder(dest_folder_name)
self.client.MoveIntoFolder(self.folder, dest_folder)
self.folder = self.client.Get(self.folder.GetSelfLink().href)
folder_was_moved = False
for category in self.folder.category:
if category.term == dest_folder_name:
folder_was_moved = True
break
self.assert_(folder_was_moved)
#cleanup
dest_folder = self.client.Get(dest_folder.GetSelfLink().href)
self.editClient.Delete(dest_folder.GetEditLink().href)
class DocumentListUploadTest(DocumentsListServiceTest):
def testUploadAndDeleteDocument(self):
ms = gdata.MediaSource(file_path='test.doc',
content_type='application/msword')
entry = self.client.Upload(ms, 'test doc')
self.assertEqual(entry.title.text, 'test doc')
self.assertEqual(entry.category[0].label, 'document')
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.editClient.Delete(entry.GetEditLink().href)
def testUploadAndDeletePresentation(self):
ms = gdata.MediaSource(file_path='test.ppt',
content_type='application/vnd.ms-powerpoint')
entry = self.client.Upload(ms, 'test preso')
self.assertEqual(entry.title.text, 'test preso')
self.assertEqual(entry.category[0].label, 'viewed')
self.assertEqual(entry.category[1].label, 'presentation')
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.editClient.Delete(entry.GetEditLink().href)
def testUploadAndDeleteSpreadsheet(self):
ms = gdata.MediaSource(file_path='test.csv',
content_type='text/csv')
entry = self.client.Upload(ms, 'test spreadsheet')
self.assert_(entry.title.text == 'test spreadsheet')
self.assertEqual(entry.category[0].label, 'viewed')
self.assertEqual(entry.category[1].label, 'spreadsheet')
self.assert_(isinstance(entry, gdata.docs.DocumentListEntry))
self.editClient.Delete(entry.GetEditLink().href)
class DocumentListUpdateTest(DocumentsListServiceTest):
def setUp(self):
DocumentsListServiceTest.setUp(self)
self.TITLE = 'CreatedTestDoc'
new_entry = gdata.docs.DocumentListEntry()
new_entry.title = gdata.atom.Title(text=self.TITLE)
new_entry.category.append(self.DOCUMENT_CATEGORY)
self.created_entry = self.client.Post(new_entry,
'/feeds/documents/private/full')
def tearDown(self):
# Delete the test doc we created
self_link = self.created_entry.GetSelfLink().href
entry = self.client.GetDocumentListEntry(self_link)
self.editClient.Delete(entry.GetEditLink().href)
def testUpdateDocumentMetadataAndContent(self):
title = 'UpdatedTestDoc'
# Update metadata
self.created_entry.title.text = title
updated_entry = self.editClient.Put(self.created_entry,
self.created_entry.GetEditLink().href)
self.assertEqual(updated_entry.title.text, title)
# Update document's content
ms = gdata.MediaSource(file_path='test.doc',
content_type='application/msword')
uri = updated_entry.GetEditMediaLink().href
updated_entry = self.editClient.Put(ms, uri)
self.assertEqual(updated_entry.title.text, title)
# Append content to document
data = 'data to append'
ms = gdata.MediaSource(file_handle=StringIO.StringIO(data),
content_type='text/plain',
content_length=len(data))
uri = updated_entry.GetEditMediaLink().href + '?append=true'
updated_entry = self.editClient.Put(ms, uri)
class DocumentListExportTest(DocumentsListServiceTest):
def testExportDocument(self):
query = ('https://docs.google.com/feeds/documents/private/full'
'/-/document?max-results=1')
feed = self.client.QueryDocumentListFeed(query)
file_paths = ['./downloadedTest.doc', './downloadedTest.html',
'./downloadedTest.odt', './downloadedTest.pdf',
'./downloadedTest.png', './downloadedTest.rtf',
'./downloadedTest.txt', './downloadedTest.zip']
for path in file_paths:
self.client.Export(feed.entry[0], path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path))
os.remove(path)
def testExportPresentation(self):
query = ('https://docs.google.com/feeds/documents/private/full'
'/-/presentation?max-results=1')
feed = self.client.QueryDocumentListFeed(query)
file_paths = ['./downloadedTest.pdf', './downloadedTest.ppt',
'./downloadedTest.swf', './downloadedTest.txt']
for path in file_paths:
self.client.Export(feed.entry[0].resourceId.text, path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path))
os.remove(path)
def testExportSpreadsheet(self):
query = ('https://docs.google.com/feeds/documents/private/full'
'/-/spreadsheet?max-results=1')
feed = self.client.QueryDocumentListFeed(query)
file_paths = ['./downloadedTest.xls', './downloadedTest.csv',
'./downloadedTest.pdf', './downloadedTest.ods',
'./downloadedTest.tsv', './downloadedTest.html']
docs_token = self.client.GetClientLoginToken()
self.client.SetClientLoginToken(self.spreadsheets.GetClientLoginToken())
for path in file_paths:
self.client.Export(feed.entry[0], path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path) > 0)
os.remove(path)
self.client.SetClientLoginToken(docs_token)
def testExportNonExistentDocument(self):
path = './ned.txt'
exception_raised = False
try:
self.client.Export('non_existent_doc', path)
except Exception, e: # expected
exception_raised = True
self.assert_(exception_raised)
self.assert_(not os.path.exists(path))
if __name__ == '__main__':
print ('DocList API Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
if client.GetClientLoginToken() is None:
client.ClientLogin(username, password,
source='Document List Client Unit Tests')
if spreadsheets.GetClientLoginToken() is None:
spreadsheets.ClientLogin(username, password,
source='Document List Client Unit Tests')
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'e.bidelman (Eric Bidelman)'
import os
import time
import unittest
import gdata.client
import gdata.data
import gdata.gauth
import gdata.docs.client
import gdata.docs.data
import gdata.test_config as conf
class DocsTestCase(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.docs.client.DocsClient()
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'DocsTest', self.client.auth_service)
def tearDown(self):
conf.close_client(self.client)
class DocsFetchingDataTest(DocsTestCase):
def testGetDocList(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetDocList')
# Query using GetDocList()
feed = self.client.GetDocList(limit=1)
self.assert_(isinstance(feed, gdata.docs.data.DocList))
self.assertEqual(len(feed.entry), 1)
def testGetDoc(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetDoc')
uri = ('http://docs.google.com/feeds/default/private/full/'
'-/spreadsheet?max-results=1')
feed = self.client.GetDocList(uri, limit=1)
self.assertEqual(len(feed.entry), 1)
self.assertEqual(feed.entry[0].GetDocumentType(), 'spreadsheet')
resource_id = feed.entry[0].resource_id.text
entry = self.client.GetDoc(resource_id)
self.assert_(isinstance(entry, gdata.docs.data.DocsEntry))
self.assert_(entry.id.text is not None)
self.assert_(entry.title.text is not None)
self.assert_(entry.resource_id.text is not None)
self.assert_(entry.title.text is not None)
def testGetAclFeed(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetAclFeed')
uri = ('http://docs.google.com/feeds/default/private/full/'
'-/mine?max-results=1')
feed = self.client.GetDocList(uri=uri)
self.assertEqual(len(feed.entry), 1)
acl_feed = self.client.GetAclPermissions(feed.entry[0].resource_id.text)
self.assert_(isinstance(acl_feed, gdata.docs.data.AclFeed))
self.assert_(isinstance(acl_feed.entry[0], gdata.docs.data.Acl))
self.assert_(acl_feed.entry[0].scope is not None)
self.assert_(acl_feed.entry[0].role is not None)
def testGetRevisionFeed(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetRevisionFeed')
uri = 'http://docs.google.com/feeds/default/private/full/-/document'
feed = self.client.GetDocList(uri=uri, limit=1)
self.assertEqual(len(feed.entry), 1)
revision_feed = self.client.GetRevisions(feed.entry[0].resource_id.text)
self.assert_(isinstance(revision_feed, gdata.docs.data.RevisionFeed))
self.assert_(isinstance(revision_feed.entry[0], gdata.docs.data.Revision))
class DocsRevisionsTest(DocsTestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.docs.client.DocsClient()
self.client.ssl = conf.options.get_value('ssl') == 'true'
conf.configure_client(self.client, 'DocsTest', self.client.auth_service)
conf.configure_cache(self.client, 'testDocsRevisions')
try:
self.testdoc = self.client.Create(
gdata.docs.data.DOCUMENT_LABEL, 'My Doc')
# Because of an etag change issue, we must sleep for a few seconds
time.sleep(10)
except:
self.tearDown()
raise
try:
self.testdoc = self.client.GetDoc(self.testdoc.resource_id.text)
self.testfile = self.client.Upload(
'test.bin', 'My Binary File', content_type='application/octet-stream')
# Because of an etag change issue, we must sleep for a few seconds
time.sleep(10)
self.testfile = self.client.GetDoc(self.testfile.resource_id.text)
except:
self.tearDown()
raise
def tearDown(self):
if conf.options.get_value('runlive') == 'true':
# Do a best effort tearDown, so pass on any exception
try:
self.client.Delete(self.testdoc)
except:
pass
try:
self.client.Delete(self.testfile)
except:
pass
conf.close_client(self.client)
def testArbFileRevisions(self):
if not conf.options.get_value('runlive') == 'true':
return
revisions = self.client.GetRevisions(self.testfile.resource_id.text)
self.assert_(isinstance(revisions, gdata.docs.data.RevisionFeed))
self.assert_(isinstance(revisions.entry[0], gdata.docs.data.Revision))
self.assertEqual(len(revisions.entry), 1)
ms = gdata.data.MediaSource(
file_path='test.bin', content_type='application/octet-stream')
self.testfile.title.text = 'My Binary File Updated'
self.testfile = self.client.Update(self.testfile, media_source=ms)
self.assertEqual(self.testfile.title.text, 'My Binary File Updated')
revisions = self.client.GetRevisions(self.testfile.resource_id.text)
self.assert_(isinstance(revisions, gdata.docs.data.RevisionFeed))
self.assert_(isinstance(revisions.entry[0], gdata.docs.data.Revision))
self.assert_(isinstance(revisions.entry[1], gdata.docs.data.Revision))
self.assertEqual(len(revisions.entry), 2)
self.client.Delete(revisions.entry[1], force=True)
revisions = self.client.GetRevisions(self.testfile.resource_id.text)
self.assert_(isinstance(revisions, gdata.docs.data.RevisionFeed))
self.assert_(isinstance(revisions.entry[0], gdata.docs.data.Revision))
self.assertEqual(len(revisions.entry), 1)
def testDocRevisions(self):
if not conf.options.get_value('runlive') == 'true':
return
revisions = self.client.GetRevisions(self.testdoc.resource_id.text)
self.assert_(isinstance(revisions, gdata.docs.data.RevisionFeed))
self.assert_(isinstance(revisions.entry[0], gdata.docs.data.Revision))
self.assertEqual(len(revisions.entry), 1)
ms = gdata.data.MediaSource(
file_path='test.doc', content_type='application/msword')
self.testdoc.title.text = 'My Doc Updated'
self.testdoc = self.client.Update(self.testdoc, media_source=ms)
revisions = self.client.GetRevisions(self.testdoc.resource_id.text)
self.assert_(isinstance(revisions, gdata.docs.data.RevisionFeed))
self.assert_(isinstance(revisions.entry[0], gdata.docs.data.Revision))
self.assert_(isinstance(revisions.entry[1], gdata.docs.data.Revision))
self.assertEqual(len(revisions.entry), 2)
class CreatingAndDeletionTest(DocsTestCase):
def testCreateAndMoveDoc(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateAndMoveDoc')
new_folder = self.client.Create(gdata.docs.data.FOLDER_LABEL, 'My Folder')
self.assertEqual(new_folder.title.text, 'My Folder')
self.assertEqual(new_folder.GetDocumentType(), 'folder')
new_doc = self.client.Create(gdata.docs.data.DOCUMENT_LABEL, 'My Doc',
writers_can_invite=False)
self.assertEqual(new_doc.GetDocumentType(), 'document')
self.assertEqual(new_doc.title.text, 'My Doc')
self.assertEqual(new_doc.writers_can_invite.value, 'false')
# Move doc into folder
new_entry = self.client.Move(new_doc, new_folder)
self.assertEqual(len(new_entry.InFolders()), 1)
self.assertEqual(new_entry.InFolders()[0].title, 'My Folder')
# Create new spreadsheet inside the folder.
new_spread = self.client.Create(
gdata.docs.data.SPREADSHEET_LABEL, 'My Spread', folder_or_id=new_folder)
self.assertEqual(new_spread.GetDocumentType(), 'spreadsheet')
self.assertEqual(len(new_spread.InFolders()), 1)
self.assertEqual(new_spread.InFolders()[0].title, 'My Folder')
# Create new folder, and move spreadsheet into that folder too.
new_folder2 = self.client.Create(gdata.docs.data.FOLDER_LABEL, 'My Folder2')
self.assertEqual(new_folder2.title.text, 'My Folder2')
self.assertEqual(new_folder2.GetDocumentType(), 'folder')
moved_entry = self.client.Move(
new_spread, new_folder2, keep_in_folders=True)
self.assertEqual(len(moved_entry.InFolders()), 2)
# Move spreadsheet to root level
was_moved = self.client.Move(moved_entry)
self.assert_(was_moved)
spread_entry = self.client.GetDoc(moved_entry.resource_id.text)
self.assertEqual(len(spread_entry.InFolders()), 0)
# Clean up our mess.
self.client.Delete(new_folder.GetEditLink().href, force=True)
self.client.Delete(new_folder2.GetEditLink().href, force=True)
self.client.Delete(new_doc.GetEditLink().href, force=True)
self.client.Delete(spread_entry.GetEditLink().href, force=True)
class DocumentListUploadTest(DocsTestCase):
def testUploadAndDeleteDocument(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUploadAndDeleteDocument')
ms = gdata.data.MediaSource(file_path='test.doc',
content_type='application/msword')
entry = self.client.Upload(ms, 'test doc')
self.assertEqual(entry.title.text, 'test doc')
self.assertEqual(entry.GetDocumentType(), 'document')
self.assert_(isinstance(entry, gdata.docs.data.DocsEntry))
self.client.Delete(entry, force=True)
def testUploadAndDeletePdf(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUploadAndDeletePdf')
# Try passing in filename isntead of MediaSource object on this upload.
entry = self.client.Upload(
'test.pdf', 'test pdf', content_type='application/pdf')
self.assertEqual(entry.title.text, 'test pdf')
self.assertEqual(entry.GetDocumentType(), 'pdf')
self.assert_(isinstance(entry, gdata.docs.data.DocsEntry))
self.client.Delete(entry, force=True)
class DocumentListExportTest(DocsTestCase):
def testExportDocument(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testExportDocument')
uri = 'http://docs.google.com/feeds/default/private/full/-/document'
feed = self.client.GetDocList(uri=uri, limit=1)
file_paths = ['./downloadedTest.doc', './downloadedTest.html',
'./downloadedTest.odt', './downloadedTest.pdf',
'./downloadedTest.png', './downloadedTest.rtf',
'./downloadedTest.txt', './downloadedTest.zip']
for path in file_paths:
self.client.Export(feed.entry[0], path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path))
os.remove(path)
def testExportNonExistentDocument(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testExportNonExistentDocument')
path = './ned.txt'
self.assert_(not os.path.exists(path))
exception_raised = False
try:
self.client.Export('non_existent_doc', path)
except Exception, e: # expected
exception_raised = True
self.assert_(exception_raised)
self.assert_(not os.path.exists(path))
def testDownloadPdf(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testDownloadPdf')
uri = 'http://docs.google.com/feeds/default/private/full/-/pdf'
feed = self.client.GetDocList(uri=uri, limit=1)
path = './downloadedTest.pdf'
self.client.Download(feed.entry[0], path)
self.assert_(os.path.exists(path))
self.assert_(os.path.getsize(path))
os.remove(path)
def suite():
return conf.build_suite([DocsFetchingDataTest, CreatingAndDeletionTest,
DocumentListUploadTest, DocumentListExportTest,
DocsRevisionsTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains Unit Tests for Google Profiles API.
ProfilesServiceTest: Provides methods to test feeds and manipulate items.
ProfilesQueryTest: Constructs a query object for the profiles feed.
Extends Query.
"""
__author__ = 'jtoledo (Julian Toledo)'
import getopt
import getpass
import sys
import unittest
import gdata.contacts
import gdata.contacts.service
email = ''
password = ''
domain = ''
server = 'www.google.com'
GDATA_VER_HEADER = 'GData-Version'
class ProfilesServiceTest(unittest.TestCase):
def setUp(self):
additional_headers = {GDATA_VER_HEADER: 3}
self.gd_client = gdata.contacts.service.ContactsService(
contact_list=domain, additional_headers=additional_headers )
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'GoogleInc-ProfilesPythonTest-1'
self.gd_client.ProgrammaticLogin()
def testGetFeedUriCustom(self):
uri = self.gd_client.GetFeedUri(kind='profiles', scheme='https')
self.assertEquals(
'https://%s/m8/feeds/profiles/domain/%s/full' % (server, domain), uri)
def testGetProfileFeedUriDefault(self):
self.gd_client.contact_list = 'domain.com'
self.assertEquals('/m8/feeds/profiles/domain/domain.com/full',
self.gd_client.GetFeedUri('profiles'))
def testCleanUriNeedsCleaning(self):
self.assertEquals('/relative/uri', self.gd_client._CleanUri(
'http://www.google.com/relative/uri'))
def testCleanUriDoesNotNeedCleaning(self):
self.assertEquals('/relative/uri', self.gd_client._CleanUri(
'/relative/uri'))
def testGetProfilesFeed(self):
feed = self.gd_client.GetProfilesFeed()
self.assert_(isinstance(feed, gdata.contacts.ProfilesFeed))
def testGetProfile(self):
# Gets an existing entry
feed = self.gd_client.GetProfilesFeed()
entry = feed.entry[0]
self.assert_(isinstance(entry, gdata.contacts.ProfileEntry))
self.assertEquals(entry.title.text,
self.gd_client.GetProfile(entry.id.text).title.text)
self.assertEquals(entry._children,
self.gd_client.GetProfile(entry.id.text)._children)
def testUpdateProfile(self):
feed = self.gd_client.GetProfilesFeed()
entry = feed.entry[1]
original_occupation = entry.occupation
entry.occupation = gdata.contacts.Occupation(text='TEST')
updated = self.gd_client.UpdateProfile(entry.GetEditLink().href, entry)
self.assertEquals('TEST', updated.occupation.text)
updated.occupation = original_occupation
self.gd_client.UpdateProfile(updated.GetEditLink().href, updated)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw=', 'domain='])
except getopt.error, msg:
print ('Profiles Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.\n'
'\nUsage: service_test.py --email=EMAIL '
'--password=PASSWORD --domain=DOMAIN\n')
sys.exit(2)
# Process options
for option, arg in opts:
if option == '--email':
email = arg
elif option == '--pw':
password = arg
elif option == '--domain':
domain = arg
while not email:
print 'NOTE: Please run these tests only with a test account.'
email = raw_input('Please enter your email: ')
while not password:
password = getpass.getpass('Please enter password: ')
if not password:
print 'Password cannot be blank.'
while not domain:
print 'NOTE: Please run these tests only with a test account.'
domain = raw_input('Please enter your Apps domain: ')
suite = unittest.makeSuite(ProfilesServiceTest)
unittest.TextTestRunner().run(suite)
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import atom.core
import atom.data
import atom.http_core
import gdata.contacts.client
import gdata.data
import gdata.test_config as conf
import unittest
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
conf.options.register_option(conf.TARGET_USERNAME_OPTION)
class ProfileTest(unittest.TestCase):
def setUp(self):
self.client = gdata.contacts.client.ContactsClient(domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.contacts.client.ContactsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'ProfileTest',
self.client.auth_service, True)
self.client.username = conf.options.get_value('appsusername').split('@')[0]
def tearDown(self):
conf.close_client(self.client)
def test_profiles_feed(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
feed = self.client.get_profiles_feed()
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
def test_profiles_query(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
query = gdata.contacts.client.ProfilesQuery(max_results=1)
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
# Needs at least 2 profiles in the feed to test the start-key
# query param.
next = feed.GetNextLink()
feed = None
if next:
# Retrieve the start-key query param from the next link.
uri = atom.http_core.Uri.parse_uri(next.href)
if 'start-key' in uri.query:
query.start_key = uri.query['start-key']
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
self.assert_(feed.GetSelfLink().href == next.href)
# Compare with a feed retrieved with the next link.
next_feed = self.client.get_profiles_feed(uri=next.href)
self.assert_(len(next_feed.entry) == 1)
self.assert_(next_feed.entry[0].id.text == feed.entry[0].id.text)
def suite():
return conf.build_suite([ProfileTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import getpass
import re
import unittest
import urllib
import atom
import gdata.contacts.service
import gdata.test_config as conf
conf.options.register_option(conf.TEST_IMAGE_LOCATION_OPTION)
class ContactsServiceTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.contacts.service.ContactsService()
conf.configure_service(self.gd_client, 'ContactsServiceTest', 'cp')
self.gd_client.email = conf.options.get_value('username')
def tearDown(self):
conf.close_service(self.gd_client)
def testGetContactsFeed(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testGetContactsFeed')
feed = self.gd_client.GetContactsFeed()
self.assert_(isinstance(feed, gdata.contacts.ContactsFeed))
def testDefaultContactList(self):
self.assertEquals('default', self.gd_client.contact_list)
def testCustomContactList(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testCustomContactList')
self.gd_client.contact_list = conf.options.get_value('username')
feed = self.gd_client.GetContactsFeed()
self.assert_(isinstance(feed, gdata.contacts.ContactsFeed))
def testGetFeedUriDefault(self):
self.gd_client.contact_list = 'domain.com'
self.assertEquals('/m8/feeds/contacts/domain.com/full',
self.gd_client.GetFeedUri())
def testGetFeedUriCustom(self):
uri = self.gd_client.GetFeedUri(kind='groups',
contact_list='example.com',
projection='base/batch',
scheme='https')
self.assertEquals(
'https://www.google.com/m8/feeds/groups/example.com/base/batch', uri)
def testCreateUpdateDeleteContactAndUpdatePhoto(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testCreateUpdateDeleteContactAndUpdatePhoto')
DeleteTestContact(self.gd_client)
# Create a new entry
new_entry = gdata.contacts.ContactEntry()
new_entry.title = atom.Title(text='Elizabeth Bennet')
new_entry.content = atom.Content(text='Test Notes')
new_entry.email.append(gdata.contacts.Email(
rel='http://schemas.google.com/g/2005#work',
primary='true',
address='liz@gmail.com'))
new_entry.phone_number.append(gdata.contacts.PhoneNumber(
rel='http://schemas.google.com/g/2005#work', text='(206)555-1212'))
new_entry.organization = gdata.contacts.Organization(
org_name=gdata.contacts.OrgName(text='TestCo.'),
rel='http://schemas.google.com/g/2005#work')
entry = self.gd_client.CreateContact(new_entry)
# Generate and parse the XML for the new entry.
self.assertEquals(entry.title.text, new_entry.title.text)
self.assertEquals(entry.content.text, 'Test Notes')
self.assertEquals(len(entry.email), 1)
self.assertEquals(entry.email[0].rel, new_entry.email[0].rel)
self.assertEquals(entry.email[0].address, 'liz@gmail.com')
self.assertEquals(len(entry.phone_number), 1)
self.assertEquals(entry.phone_number[0].rel,
new_entry.phone_number[0].rel)
self.assertEquals(entry.phone_number[0].text, '(206)555-1212')
self.assertEquals(entry.organization.org_name.text, 'TestCo.')
# Edit the entry.
entry.phone_number[0].text = '(555)555-1212'
updated = self.gd_client.UpdateContact(entry.GetEditLink().href, entry)
self.assertEquals(updated.content.text, 'Test Notes')
self.assertEquals(len(updated.phone_number), 1)
self.assertEquals(updated.phone_number[0].rel,
entry.phone_number[0].rel)
self.assertEquals(updated.phone_number[0].text, '(555)555-1212')
# Change the contact's photo.
updated_photo = self.gd_client.ChangePhoto(
conf.options.get_value('imgpath'), updated,
content_type='image/jpeg')
# Refetch the contact so that it has the new photo link
updated = self.gd_client.GetContact(updated.GetSelfLink().href)
self.assert_(updated.GetPhotoLink() is not None)
# Fetch the photo data.
hosted_image = self.gd_client.GetPhoto(updated)
self.assert_(hosted_image is not None)
# Delete the entry.
self.gd_client.DeleteContact(updated.GetEditLink().href)
def testCreateAndDeleteContactUsingBatch(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client, 'testCreateAndDeleteContactUsingBatch')
# Get random data for creating contact
random_contact_number = 'notRandom12'
random_contact_title = 'Random Contact %s' % (
random_contact_number)
# Set contact data
contact = gdata.contacts.ContactEntry()
contact.title = atom.Title(text=random_contact_title)
contact.email = gdata.contacts.Email(
address='user%s@example.com' % random_contact_number,
primary='true',
rel=gdata.contacts.REL_WORK)
contact.content = atom.Content(text='Contact created by '
'gdata-python-client automated test '
'suite.')
# Form a batch request
batch_request = gdata.contacts.ContactsFeed()
batch_request.AddInsert(entry=contact)
# Execute the batch request to insert the contact.
default_batch_url = gdata.contacts.service.DEFAULT_BATCH_URL
batch_result = self.gd_client.ExecuteBatch(batch_request,
default_batch_url)
self.assertEquals(len(batch_result.entry), 1)
self.assertEquals(batch_result.entry[0].title.text,
random_contact_title)
self.assertEquals(batch_result.entry[0].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(batch_result.entry[0].batch_status.code,
'201')
expected_batch_url = re.compile('default').sub(
urllib.quote(self.gd_client.email),
gdata.contacts.service.DEFAULT_BATCH_URL)
self.failUnless(batch_result.GetBatchLink().href,
expected_batch_url)
# Create a batch request to delete the newly created entry.
batch_delete_request = gdata.contacts.ContactsFeed()
batch_delete_request.AddDelete(entry=batch_result.entry[0])
batch_delete_result = self.gd_client.ExecuteBatch(
batch_delete_request,
batch_result.GetBatchLink().href)
self.assertEquals(len(batch_delete_result.entry), 1)
self.assertEquals(batch_delete_result.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEquals(batch_result.entry[0].batch_status.code,
'201')
def testCleanUriNeedsCleaning(self):
self.assertEquals('/relative/uri', self.gd_client._CleanUri(
'http://www.google.com/relative/uri'))
def testCleanUriDoesNotNeedCleaning(self):
self.assertEquals('/relative/uri', self.gd_client._CleanUri(
'/relative/uri'))
class ContactsQueryTest(unittest.TestCase):
def testConvertToStringDefaultFeed(self):
query = gdata.contacts.service.ContactsQuery()
self.assertEquals(str(query), '/m8/feeds/contacts/default/full')
query.max_results = 10
self.assertEquals(query.ToUri(),
'/m8/feeds/contacts/default/full?max-results=10')
def testConvertToStringCustomFeed(self):
query = gdata.contacts.service.ContactsQuery('/custom/feed/uri')
self.assertEquals(str(query), '/custom/feed/uri')
query.max_results = '10'
self.assertEquals(query.ToUri(), '/custom/feed/uri?max-results=10')
def testGroupQueryParameter(self):
query = gdata.contacts.service.ContactsQuery()
query.group = 'http://google.com/m8/feeds/groups/liz%40gmail.com/full/270f'
self.assertEquals(query.ToUri(), '/m8/feeds/contacts/default/full'
'?group=http%3A%2F%2Fgoogle.com%2Fm8%2Ffeeds%2Fgroups'
'%2Fliz%2540gmail.com%2Ffull%2F270f')
class ContactsGroupsTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.contacts.service.ContactsService()
conf.configure_service(self.gd_client, 'ContactsServiceTest', 'cp')
def tearDown(self):
conf.close_service(self.gd_client)
def testCreateUpdateDeleteGroup(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_service_cache(self.gd_client,
'testCreateUpdateDeleteGroup')
test_group = gdata.contacts.GroupEntry(title=atom.Title(
text='test group py'))
new_group = self.gd_client.CreateGroup(test_group)
self.assert_(isinstance(new_group, gdata.contacts.GroupEntry))
self.assertEquals(new_group.title.text, 'test group py')
# Change the group's title
new_group.title.text = 'new group name py'
updated_group = self.gd_client.UpdateGroup(new_group.GetEditLink().href,
new_group)
self.assertEquals(updated_group.title.text, new_group.title.text)
# Remove the group
self.gd_client.DeleteGroup(updated_group.GetEditLink().href)
# Utility methods.
def DeleteTestContact(client):
# Get test contact
feed = client.GetContactsFeed()
for entry in feed.entry:
if (entry.title.text == 'Elizabeth Bennet' and
entry.content.text == 'Test Notes' and
entry.email[0].address == 'liz@gmail.com'):
client.DeleteContact(entry.GetEditLink().href)
def suite():
return unittest.TestSuite((unittest.makeSuite(ContactsServiceTest, 'test'),
unittest.makeSuite(ContactsQueryTest, 'test'),
unittest.makeSuite(ContactsGroupsTest, 'test'),))
if __name__ == '__main__':
print ('Contacts Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.test_config as conf
import gdata.contacts.client
import atom.core
import atom.data
import gdata.data
class ContactsTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.contacts.client.ContactsClient()
conf.configure_client(self.client, 'ContactsTest', 'cp')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete_contact(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete_contact')
new_contact = gdata.contacts.data.ContactEntry(
nickname=gdata.contacts.data.NickName(text='Joe'),
name=gdata.data.Name(
given_name=gdata.data.GivenName(text='Joseph'),
family_name=gdata.data.FamilyName(text='Testerson')))
new_contact.birthday = gdata.contacts.data.Birthday(when='2009-11-11')
new_contact.language.append(gdata.contacts.data.Language(
label='German'))
created = self.client.create_contact(new_contact)
# Add another language.
created.language.append(gdata.contacts.data.Language(
label='French'))
# Create a new membership group for our test contact.
new_group = gdata.contacts.data.GroupEntry(
title=atom.data.Title(text='a test group'))
created_group = self.client.create_group(new_group)
self.assert_(created_group.id.text)
# Add the contact to the new group.
created.group_membership_info.append(
gdata.contacts.data.GroupMembershipInfo(href=created_group.id.text))
# Upload the changes to the language and group membership.
edited = self.client.update(created)
# Delete the group and the test contact.
self.client.delete(created_group)
self.client.delete(edited)
def test_low_level_create_update_delete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_low_level_create_update_delete')
entry = atom.data.Entry()
entry.title = atom.data.Title(text='Jeff')
entry._other_elements.append(
gdata.data.Email(rel=gdata.data.WORK_REL, address='j.s@google.com'))
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(entry.to_string(), 'application/atom+xml')
posted = self.client.request('POST',
'http://www.google.com/m8/feeds/contacts/default/full',
desired_class=atom.data.Entry, http_request=http_request)
self_link = None
edit_link = None
for link in posted.get_elements('link', 'http://www.w3.org/2005/Atom'):
if link.get_attributes('rel')[0].value == 'self':
self_link = link.get_attributes('href')[0].value
elif link.get_attributes('rel')[0].value == 'edit':
edit_link = link.get_attributes('href')[0].value
self.assert_(self_link is not None)
self.assert_(edit_link is not None)
etag = posted.get_attributes('etag')[0].value
self.assert_(etag is not None)
self.assert_(len(etag) > 0)
# Delete the test contact.
http_request = atom.http_core.HttpRequest()
http_request.headers['If-Match'] = etag
self.client.request('DELETE', edit_link, http_request=http_request)
def suite():
return conf.build_suite([ContactsTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.lliabraa@google.com (Lane LiaBraaten)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata.calendar
import gdata.calendar.service
import gdata.service
import random
import getpass
from gdata import test_data
username = ''
password = ''
class CalendarServiceAclUnitTest(unittest.TestCase):
_aclFeedUri = "/calendar/feeds/default/acl/full"
_aclEntryUri = "%s/user:%s" % (_aclFeedUri, "user@gmail.com",)
def setUp(self):
self.cal_client = gdata.calendar.service.CalendarService()
self.cal_client.email = username
self.cal_client.password = password
self.cal_client.source = 'GCalendarClient ACL "Unit" Tests'
def tearDown(self):
# No teardown needed
pass
def _getRandomNumber(self):
"""Return a random number as a string for testing"""
r = random.Random()
r.seed()
return str(r.randint(100000,1000000))
def _generateAclEntry(self, role="owner", scope_type="user", scope_value=None):
"""Generates a ACL rule from parameters or makes a random user an owner by default"""
if (scope_type=="user" and scope_value is None):
scope_value = "user%s@gmail.com" % (self._getRandomNumber())
rule = gdata.calendar.CalendarAclEntry()
rule.title = atom.Title(text=role)
rule.scope = gdata.calendar.Scope(value=scope_value, type="user")
rule.role = gdata.calendar.Role(value="http://schemas.google.com/gCal/2005#%s" % (role))
return rule
def assertEqualAclEntry(self, expected, actual):
"""Compares the values of two ACL entries"""
self.assertEqual(expected.role.value, actual.role.value)
self.assertEqual(expected.scope.value, actual.scope.value)
self.assertEqual(expected.scope.type, actual.scope.type)
def testGetAclFeedUnauthenticated(self):
"""Fiendishly try to get an ACL feed without authenticating"""
try:
self.cal_client.GetCalendarAclFeed(self._aclFeedUri)
self.fail("Unauthenticated request should fail")
except gdata.service.RequestError, error:
self.assertEqual(error[0]['status'], 401)
self.assertEqual(error[0]['reason'], "Authorization required")
def testGetAclFeed(self):
"""Get an ACL feed"""
self.cal_client.ProgrammaticLogin()
feed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri)
self.assertNotEqual(0,len(feed.entry))
def testGetAclEntryUnauthenticated(self):
"""Fiendishly try to get an ACL entry without authenticating"""
try:
self.cal_client.GetCalendarAclEntry(self._aclEntryUri)
self.fail("Unauthenticated request should fail");
except gdata.service.RequestError, error:
self.assertEqual(error[0]['status'], 401)
self.assertEqual(error[0]['reason'], "Authorization required")
def testGetAclEntry(self):
"""Get an ACL entry"""
self.cal_client.ProgrammaticLogin()
self.cal_client.GetCalendarAclEntry(self._aclEntryUri)
def testCalendarAclFeedFromString(self):
"""Create an ACL feed from a hard-coded string"""
aclFeed = gdata.calendar.CalendarAclFeedFromString(test_data.ACL_FEED)
self.assertEqual("Elizabeth Bennet's access control list", aclFeed.title.text)
self.assertEqual(2,len(aclFeed.entry))
def testCalendarAclEntryFromString(self):
"""Create an ACL entry from a hard-coded string"""
aclEntry = gdata.calendar.CalendarAclEntryFromString(test_data.ACL_ENTRY)
self.assertEqual("owner", aclEntry.title.text)
self.assertEqual("user", aclEntry.scope.type)
self.assertEqual("liz@gmail.com", aclEntry.scope.value)
self.assertEqual("http://schemas.google.com/gCal/2005#owner", aclEntry.role.value)
def testCreateAndDeleteAclEntry(self):
"""Add an ACL rule and verify that is it returned in the ACL feed. Then delete the rule and
verify that the rule is no longer included in the ACL feed."""
# Get the current number of ACL rules
self.cal_client.ProgrammaticLogin()
aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri)
original_rule_count = len(aclFeed.entry)
# Insert entry
rule = self._generateAclEntry()
returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri)
# Verify rule was added with correct ACL values
aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri)
self.assertEqual(original_rule_count+1, len(aclFeed.entry))
self.assertEqualAclEntry(rule, returned_rule)
# Delete the event
self.cal_client.DeleteAclEntry(returned_rule.GetEditLink().href)
aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri)
self.assertEquals(original_rule_count, len(aclFeed.entry))
def testUpdateAclChangeScopeValue(self):
"""Fiendishly try to insert a test ACL rule and attempt to change the scope value (i.e. username).
Verify that an exception is thrown, then delete the test rule."""
# Insert a user-scoped owner role ot random user
aclEntry = self._generateAclEntry("owner","user");
self.cal_client.ProgrammaticLogin()
rule = self._generateAclEntry()
returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri)
# Change the scope value (i.e. what user is the owner) and update the entry
updated_rule = returned_rule
updated_rule.scope.value = "user_%s@gmail.com" % (self._getRandomNumber())
try:
returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule)
except gdata.service.RequestError, error:
self.assertEqual(error[0]['status'], 403)
self.assertEqual(error[0]['reason'], "Forbidden")
self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href)
def testUpdateAclChangeScopeType(self):
"""Fiendishly try to insert a test ACL rule and attempt to change the scope type (i.e. from 'user' to 'domain').
Verify that an exception is thrown, then delete the test rule."""
# Insert a user-scoped owner role ot random user
aclEntry = self._generateAclEntry("owner","user");
self.cal_client.ProgrammaticLogin()
rule = self._generateAclEntry()
returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri)
# Change the scope value (i.e. what user is the owner) and update the entry
updated_rule = returned_rule
updated_rule.scope.type = "domain"
try:
returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule)
except gdata.service.RequestError, error:
self.assertEqual(error[0]['status'], 403)
self.assertEqual(error[0]['reason'], "Forbidden")
self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href)
def testUpdateAclChangeRoleValue(self):
"""Insert a test ACL rule and attempt to change the scope type (i.e. from 'owner' to 'editor').
Verify that an exception is thrown, then delete the test rule."""
# Insert a user-scoped owner role ot random user
aclEntry = self._generateAclEntry("owner","user");
self.cal_client.ProgrammaticLogin()
rule = self._generateAclEntry()
returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri)
# Change the scope value (i.e. what user is the owner) and update the entry
updated_rule = returned_rule
updated_rule.role.value = "http://schemas.google.com/gCal/2005#editor"
returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule)
self.assertEqualAclEntry(updated_rule, returned_rule)
self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href)
if __name__ == '__main__':
print ('NOTE: Please run these tests only with a test account. ' +
'The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.rboyd@google.com (Ryan Boyd)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import atom.mock_http
import gdata.calendar
import gdata.calendar.service
import random
import getpass
username = ''
password = ''
class CalendarServiceUnitTest(unittest.TestCase):
def setUp(self):
self.cal_client = gdata.calendar.service.CalendarService()
self.cal_client.email = username
self.cal_client.password = password
self.cal_client.source = 'GCalendarClient "Unit" Tests'
def tearDown(self):
# No teardown needed
pass
def testUrlScrubbing(self):
self.assertEquals(self.cal_client._RemoveStandardUrlPrefix(
'/test'), '/test')
self.assertEquals(self.cal_client._RemoveStandardUrlPrefix(
'http://www.google.com/calendar/test'), '/calendar/test')
self.assertEquals(self.cal_client._RemoveStandardUrlPrefix(
'https://www.google.com/calendar/test'),
'https://www.google.com/calendar/test')
def testPostUpdateAndDeleteSubscription(self):
"""Test posting a new subscription, updating it, deleting it"""
self.cal_client.ProgrammaticLogin()
subscription_id = 'c4o4i7m2lbamc4k26sc2vokh5g%40group.calendar.google.com'
subscription_url = '%s%s' % (
'http://www.google.com/calendar/feeds/default/allcalendars/full/',
subscription_id)
# Subscribe to Google Doodles calendar
calendar = gdata.calendar.CalendarListEntry()
calendar.id = atom.Id(text=subscription_id)
returned_calendar = self.cal_client.InsertCalendarSubscription(calendar)
self.assertEquals(subscription_url, returned_calendar.id.text)
self.assertEquals('Google Doodles', returned_calendar.title.text)
# Update subscription
calendar_to_update = self.cal_client.GetCalendarListEntry(subscription_url)
self.assertEquals('Google Doodles', calendar_to_update.title.text)
self.assertEquals('true', calendar_to_update.selected.value)
calendar_to_update.selected.value = 'false'
self.assertEquals('false', calendar_to_update.selected.value)
updated_calendar = self.cal_client.UpdateCalendar(calendar_to_update)
self.assertEquals('false', updated_calendar.selected.value)
# Delete subscription
response = self.cal_client.DeleteCalendarEntry(
returned_calendar.GetEditLink().href)
self.assertEquals(True, response)
def testPostUpdateAndDeleteCalendar(self):
"""Test posting a new calendar, updating it, deleting it"""
self.cal_client.ProgrammaticLogin()
# New calendar to create
title='Little League Schedule'
description='This calendar contains practice and game times'
time_zone='America/Los_Angeles'
hidden=False
location='Oakland'
color='#2952A3'
# Calendar object
calendar = gdata.calendar.CalendarListEntry()
calendar.title = atom.Title(text=title)
calendar.summary = atom.Summary(text=description)
calendar.where = gdata.calendar.Where(value_string=location)
calendar.color = gdata.calendar.Color(value=color)
calendar.timezone = gdata.calendar.Timezone(value=time_zone)
if hidden:
calendar.hidden = gdata.calendar.Hidden(value='true')
else:
calendar.hidden = gdata.calendar.Hidden(value='false')
# Create calendar
new_calendar = self.cal_client.InsertCalendar(new_calendar=calendar)
self.assertEquals(title, new_calendar.title.text)
self.assertEquals(description, new_calendar.summary.text)
self.assertEquals(location, new_calendar.where.value_string)
self.assertEquals(color, new_calendar.color.value)
self.assertEquals(time_zone, new_calendar.timezone.value)
if hidden:
self.assertEquals('true', new_calendar.hidden.value)
else:
self.assertEquals('false', new_calendar.hidden.value)
# Update calendar
calendar_to_update = self.cal_client.GetCalendarListEntry(
new_calendar.id.text)
updated_title = 'This is the updated title'
calendar_to_update.title.text = updated_title
updated_calendar = self.cal_client.UpdateCalendar(calendar_to_update)
self.assertEquals(updated_title, updated_calendar.title.text)
# Delete calendar
calendar_to_delete = self.cal_client.GetCalendarListEntry(
new_calendar.id.text)
self.cal_client.Delete(calendar_to_delete.GetEditLink().href)
return new_calendar
def testPostAndDeleteExtendedPropertyEvent(self):
"""Test posting a new entry with an extended property, deleting it"""
# Get random data for creating event
r = random.Random()
r.seed()
random_event_number = str(r.randint(100000,1000000))
random_event_title = 'My Random Extended Property Test Event %s' % (
random_event_number)
# Set event data
event = gdata.calendar.CalendarEventEntry()
event.author.append(atom.Author(name=atom.Name(text='GData Test user')))
event.title = atom.Title(text=random_event_title)
event.content = atom.Content(text='Picnic with some lunch')
event.extended_property.append(gdata.calendar.ExtendedProperty(
name='prop test name', value='prop test value'))
# Insert event
self.cal_client.ProgrammaticLogin()
new_event = self.cal_client.InsertEvent(event,
'/calendar/feeds/default/private/full')
self.assertEquals(event.extended_property[0].value,
new_event.extended_property[0].value)
# Delete the event
self.cal_client.DeleteEvent(new_event.GetEditLink().href)
# WARNING: Due to server-side issues, this test takes a while (~60seconds)
def testPostEntryWithCommentAndDelete(self):
"""Test posting a new entry with an extended property, deleting it"""
# Get random data for creating event
r = random.Random()
r.seed()
random_event_number = str(r.randint(100000,1000000))
random_event_title = 'My Random Comments Test Event %s' % (
random_event_number)
# Set event data
event = gdata.calendar.CalendarEventEntry()
event.author.append(atom.Author(name=atom.Name(text='GData Test user')))
event.title = atom.Title(text=random_event_title)
event.content = atom.Content(text='Picnic with some lunch')
# Insert event
self.cal_client.ProgrammaticLogin()
new_event = self.cal_client.InsertEvent(event,
'/calendar/feeds/default/private/full')
# Get comments feed
comments_url = new_event.comments.feed_link.href
comments_query = gdata.calendar.service.CalendarEventCommentQuery(comments_url)
comments_feed = self.cal_client.CalendarQuery(comments_query)
# Add comment
comments_entry = gdata.calendar.CalendarEventCommentEntry()
comments_entry.content = atom.Content(text='Comments content')
comments_entry.author.append(
atom.Author(name=atom.Name(text='GData Test user'),
email=atom.Email(text=username)))
new_comments_entry = self.cal_client.InsertEventComment(comments_entry,
comments_feed.GetPostLink().href)
# Delete the event
event_to_delete = self.cal_client.GetCalendarEventEntry(new_event.id.text)
self.cal_client.DeleteEvent(event_to_delete.GetEditLink().href)
def testPostQueryUpdateAndDeleteEvents(self):
"""Test posting a new entry, updating it, deleting it, querying for it"""
# Get random data for creating event
r = random.Random()
r.seed()
random_event_number = str(r.randint(100000,1000000))
random_event_title = 'My Random Test Event %s' % random_event_number
random_start_hour = (r.randint(1,1000000) % 23)
random_end_hour = random_start_hour + 1
non_random_start_minute = 0
non_random_end_minute = 0
random_month = (r.randint(1,1000000) % 12 + 1)
random_day_of_month = (r.randint(1,1000000) % 28 + 1)
non_random_year = 2008
start_time = '%04d-%02d-%02dT%02d:%02d:00.000-05:00' % (
non_random_year, random_month, random_day_of_month,
random_start_hour, non_random_start_minute,)
end_time = '%04d-%02d-%02dT%02d:%02d:00.000-05:00' % (
non_random_year, random_month, random_day_of_month,
random_end_hour, non_random_end_minute,)
# Set event data
event = gdata.calendar.CalendarEventEntry()
event.author.append(atom.Author(name=atom.Name(text='GData Test user')))
event.title = atom.Title(text=random_event_title)
event.content = atom.Content(text='Picnic with some lunch')
event.where.append(gdata.calendar.Where(value_string='Down by the river'))
event.when.append(gdata.calendar.When(start_time=start_time,end_time=end_time))
# Insert event
self.cal_client.ProgrammaticLogin()
new_event = self.cal_client.InsertEvent(event,
'/calendar/feeds/default/private/full')
# Ensure that atom data returned from calendar server equals atom data sent
self.assertEquals(event.title.text, new_event.title.text)
self.assertEquals(event.content.text, new_event.content.text)
# Ensure that gd:where data returned from calendar equals value sent
self.assertEquals(event.where[0].value_string,
new_event.where[0].value_string)
# Commented out as dateutil is not in this repository
# Ensure that dates returned from calendar server equals dates sent
#start_time_py = parse(event.when[0].start_time)
#start_time_py_new = parse(new_event.when[0].start_time)
#self.assertEquals(start_time_py, start_time_py_new)
#end_time_py = parse(event.when[0].end_time)
#end_time_py_new = parse(new_event.when[0].end_time)
#self.assertEquals(end_time_py, end_time_py_new)
# Update event
event_to_update = new_event
updated_title_text = event_to_update.title.text + ' - UPDATED'
event_to_update.title = atom.Title(text=updated_title_text)
updated_event = self.cal_client.UpdateEvent(
event_to_update.GetEditLink().href, event_to_update)
# Ensure that updated title was set in the updated event
self.assertEquals(event_to_update.title.text, updated_event.title.text)
# Delete the event
self.cal_client.DeleteEvent(updated_event.GetEditLink().href)
# Ensure deleted event is marked as canceled in the feed
after_delete_query = gdata.calendar.service.CalendarEventQuery()
after_delete_query.updated_min = '2007-01-01'
after_delete_query.text_query = str(random_event_number)
after_delete_query.max_results = '1'
after_delete_query_result = self.cal_client.CalendarQuery(
after_delete_query)
# Ensure feed returned at max after_delete_query.max_results events
self.assert_(
len(after_delete_query_result.entry) <= after_delete_query.max_results)
# Ensure status of returned event is canceled
self.assertEquals(after_delete_query_result.entry[0].event_status.value,
'CANCELED')
def testEventWithSyncEventAndUID(self):
"""Test posting a new entry (with syncEvent and a UID) and deleting it."""
# Get random data for creating event
r = random.Random()
r.seed()
random_event_number = str(r.randint(100000,1000000))
random_event_title = 'My Random Test Event %s' % random_event_number
random_start_hour = (r.randint(1,1000000) % 23)
random_end_hour = random_start_hour + 1
non_random_start_minute = 0
non_random_end_minute = 0
random_month = (r.randint(1,1000000) % 12 + 1)
random_day_of_month = (r.randint(1,1000000) % 28 + 1)
non_random_year = 2008
start_time = '%04d-%02d-%02dT%02d:%02d:00.000-05:00' % (
non_random_year, random_month, random_day_of_month,
random_start_hour, non_random_start_minute,)
end_time = '%04d-%02d-%02dT%02d:%02d:00.000-05:00' % (
non_random_year, random_month, random_day_of_month,
random_end_hour, non_random_end_minute,)
# create a random event ID. I'm mimicing an example from outlook here,
# the format doesn't seem to be important per the RFC except for being
# globally unique.
uid_string = ''
for i in xrange(121):
uid_string += "%X" % r.randint(0, 0xf)
# Set event data
event = gdata.calendar.CalendarEventEntry()
event.author.append(atom.Author(name=atom.Name(text='GData Test user')))
event.title = atom.Title(text=random_event_title)
event.content = atom.Content(text='Picnic with some lunch')
event.where.append(gdata.calendar.Where(value_string='Down by the river'))
event.when.append(gdata.calendar.When(
start_time=start_time,end_time=end_time))
event.sync_event = gdata.calendar.SyncEvent('true')
event.uid = gdata.calendar.UID(value=uid_string)
# Insert event
self.cal_client.ProgrammaticLogin()
new_event = self.cal_client.InsertEvent(event,
'/calendar/feeds/default/private/full')
# Inserting it a second time should fail, as it'll have the same UID
try:
bad_event = self.cal_client.InsertEvent(event,
'/calendar/feeds/default/private/full')
self.fail('Was able to insert an event with a duplicate UID')
except gdata.service.RequestError, error:
# for the current problem with redirects, just re-raise so the
# failure doesn't seem to be because of the duplicate UIDs.
status = error[0]['status']
if status == 302:
raise
# otherwise, make sure it was the right error
self.assertEquals(error[0]['status'], 409)
self.assertEquals(error[0]['reason'], 'Conflict')
# Ensure that atom data returned from calendar server equals atom data
# sent
self.assertEquals(event.title.text, new_event.title.text)
self.assertEquals(event.content.text, new_event.content.text)
# Ensure that gd:where data returned from calendar equals value sent
self.assertEquals(event.where[0].value_string,
new_event.where[0].value_string)
# Delete the event
self.cal_client.DeleteEvent(new_event.GetEditLink().href)
def testCreateAndDeleteEventUsingBatch(self):
# Get random data for creating event
r = random.Random()
r.seed()
random_event_number = str(r.randint(100000,1000000))
random_event_title = 'My Random Comments Test Event %s' % (
random_event_number)
# Set event data
event = gdata.calendar.CalendarEventEntry()
event.author.append(atom.Author(name=atom.Name(text='GData Test user')))
event.title = atom.Title(text=random_event_title)
event.content = atom.Content(text='Picnic with some lunch')
# Form a batch request
batch_request = gdata.calendar.CalendarEventFeed()
batch_request.AddInsert(entry=event)
# Execute the batch request to insert the event.
self.cal_client.ProgrammaticLogin()
batch_result = self.cal_client.ExecuteBatch(batch_request,
gdata.calendar.service.DEFAULT_BATCH_URL)
self.assertEquals(len(batch_result.entry), 1)
self.assertEquals(batch_result.entry[0].title.text, random_event_title)
self.assertEquals(batch_result.entry[0].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(batch_result.GetBatchLink().href,
gdata.calendar.service.DEFAULT_BATCH_URL)
# Create a batch request to delete the newly created entry.
batch_delete_request = gdata.calendar.CalendarEventFeed()
batch_delete_request.AddDelete(entry=batch_result.entry[0])
batch_delete_result = self.cal_client.ExecuteBatch(batch_delete_request,
batch_result.GetBatchLink().href)
self.assertEquals(len(batch_delete_result.entry), 1)
self.assertEquals(batch_delete_result.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
def testCorrectReturnTypesForGetMethods(self):
self.cal_client.ProgrammaticLogin()
result = self.cal_client.GetCalendarEventFeed()
self.assertEquals(isinstance(result, gdata.calendar.CalendarEventFeed),
True)
def testValidHostName(self):
mock_http = atom.mock_http.MockHttpClient()
response = atom.mock_http.MockResponse(body='<entry/>', status=200,
reason='OK')
mock_http.add_response(response, 'GET',
'https://www.google.com/calendar/feeds/default/allcalendars/full')
self.cal_client.ssl = True
self.cal_client.http_client = mock_http
self.cal_client.SetAuthSubToken('foo')
self.assertEquals(str(self.cal_client.token_store.find_token(
'https://www.google.com/calendar/feeds/default/allcalendars/full')),
'AuthSub token=foo')
resp = self.cal_client.Get('/calendar/feeds/default/allcalendars/full')
self.assert_(resp is not None)
class CalendarEventQueryUnitTest(unittest.TestCase):
def setUp(self):
self.query = gdata.calendar.service.CalendarEventQuery()
def testOrderByValidatesValues(self):
self.query.orderby = 'lastmodified'
self.assertEquals(self.query.orderby, 'lastmodified')
try:
self.query.orderby = 'illegal input'
self.fail()
except gdata.calendar.service.Error:
self.assertEquals(self.query.orderby, 'lastmodified')
def testSortOrderValidatesValues(self):
self.query.sortorder = 'a'
self.assertEquals(self.query.sortorder, 'a')
try:
self.query.sortorder = 'illegal input'
self.fail()
except gdata.calendar.service.Error:
self.assertEquals(self.query.sortorder, 'a')
def testTimezoneParameter(self):
self.query.ctz = 'America/Los_Angeles'
self.assertEquals(self.query['ctz'], 'America/Los_Angeles')
self.assert_(self.query.ToUri().find('America%2FLos_Angeles') > -1)
if __name__ == '__main__':
print ('Google Calendar Test\nNOTE: Please run these tests only with a '
'test account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Alexandre Vivien <alex@simplecode.fr>'
import unittest
import gdata.client
import gdata.data
import gdata.gauth
import gdata.marketplace.client
import gdata.marketplace.data
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
class LicensingClientTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
gdata.test_config.options.register(
'appsid',
'Enter the Application ID of your Marketplace application',
description='The Application ID of your Marketplace application')
gdata.test_config.options.register(
'appsconsumerkey',
'Enter the Consumer Key of your Marketplace application',
description='The Consumer Key of your Marketplace application')
gdata.test_config.options.register(
'appsconsumersecret',
'Enter the Consumer Secret of your Marketplace application',
description='The Consumer Secret of your Marketplace application')
def setUp(self):
self.client = gdata.marketplace.client.LicensingClient(domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.marketplace.client.LicensingClient(domain=conf.options.get_value('appsdomain'))
conf.configure_client(self.client, 'LicensingClientTest', self.client.auth_service, True)
self.client.auth_token = gdata.gauth.TwoLeggedOAuthHmacToken(conf.options.get_value('appsconsumerkey'), conf.options.get_value('appsconsumersecret'), '')
self.client.source = 'GData-Python-Client-Test'
self.client.account_type='HOSTED'
self.client.http_client.debug = True
self.app_id = conf.options.get_value('appsid')
def tearDown(self):
conf.close_client(self.client)
def testGetLicense(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetLicense')
fetched_feed = self.client.GetLicense(app_id=self.app_id)
self.assertTrue(isinstance(fetched_feed, gdata.marketplace.data.LicenseFeed))
self.assertTrue(isinstance(fetched_feed.entry[0], gdata.marketplace.data.LicenseEntry))
entity = fetched_feed.entry[0].content.entity
self.assertTrue(entity is not None)
self.assertNotEqual(entity.id, '')
self.assertNotEqual(entity.enabled, '')
self.assertNotEqual(entity.customer_id, '')
self.assertNotEqual(entity.state, '')
def testGetLicenseNotifications(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testGetLicenseNotifications')
fetched_feed = self.client.GetLicenseNotifications(app_id=self.app_id, max_results=2)
self.assertTrue(isinstance(fetched_feed, gdata.marketplace.data.LicenseFeed))
self.assertEqual(len(fetched_feed.entry), 2)
for entry in fetched_feed.entry:
entity = entry.content.entity
self.assertTrue(entity is not None)
self.assertNotEqual(entity.id, '')
self.assertNotEqual(entity.domain_name, '')
self.assertNotEqual(entity.installer_email, '')
self.assertNotEqual(entity.tos_acceptance_time, '')
self.assertNotEqual(entity.last_change_time, '')
self.assertNotEqual(entity.product_config_id, '')
self.assertNotEqual(entity.state, '')
next_uri = fetched_feed.find_next_link()
fetched_feed_next = self.client.GetLicenseNotifications(uri=next_uri)
self.assertTrue(isinstance(fetched_feed_next, gdata.marketplace.data.LicenseFeed))
self.assertTrue(len(fetched_feed_next.entry) <= 2)
for entry in fetched_feed_next.entry:
entity = entry.content.entity
self.assertTrue(entity is not None)
self.assertNotEqual(entity.id, '')
self.assertNotEqual(entity.domain_name, '')
self.assertNotEqual(entity.installer_email, '')
self.assertNotEqual(entity.tos_acceptance_time, '')
self.assertNotEqual(entity.last_change_time, '')
self.assertNotEqual(entity.product_config_id, '')
self.assertNotEqual(entity.state, '')
def suite():
return conf.build_suite([LicensingClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import getpass
import time
import unittest
import StringIO
import gdata.photos.service
import gdata.photos
import atom
username = ''
password = ''
test_image_location = '../../testimage.jpg'
test_image_name = 'testimage.jpg'
class PhotosServiceTest(unittest.TestCase):
def setUp(self):
# Initialize the client and create a new album for testing.
self.client = gdata.photos.service.PhotosService()
self.client.email = username
self.client.password = password
self.client.source = 'Photos Client Unit Tests'
self.client.ProgrammaticLogin()
# Give the album a unique title by appending the current time.
self.test_album = self.client.InsertAlbum(
'Python library test' + str(time.time()),
'A temporary test album.')
def testUploadGetAndDeletePhoto(self):
image_entry = self.client.InsertPhotoSimple(self.test_album,
'test', 'a pretty testing picture', test_image_location)
self.assert_(image_entry.title.text == 'test')
results_feed = self.client.SearchUserPhotos('test')
self.assert_(len(results_feed.entry) > 0)
self.client.Delete(image_entry)
def testInsertPhotoUpdateBlobAndDelete(self):
new_entry = gdata.photos.PhotoEntry()
new_entry.title = atom.Title(text='a_test_image')
new_entry.summary = atom.Summary(text='Just a test.')
new_entry.category.append(atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/photos/2007#photo'))
entry = self.client.InsertPhoto(self.test_album, new_entry,
test_image_location, content_type='image/jpeg')
self.assert_(entry.id.text)
updated_entry = self.client.UpdatePhotoBlob(entry, test_image_location)
self.assert_(entry.GetEditLink().href != updated_entry.GetEditLink().href)
self.client.Delete(updated_entry)
def tearDown(self):
# Delete the test album.
test_album = self.client.GetEntry(self.test_album.GetSelfLink().href)
self.client.Delete(test_album)
if __name__ == '__main__':
print ('Google Photos test\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Content API for Shopping tests"""
__author__ = 'afshar (Ali Afshar)'
import unittest
from gdata.contentforshopping import client, data
class CFSClientTest(unittest.TestCase):
def test_uri_missing_account_id(self):
c = client.ContentForShoppingClient()
self.assertRaises(ValueError, c._create_uri,
account_id=None, projection=None, resource='a/b')
def test_uri_bad_projection(self):
c = client.ContentForShoppingClient()
self.assertRaises(ValueError, c._create_uri,
account_id='123', projection='banana', resource='a/b')
def test_good_default_account_id(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/generic')
def test_override_request_account_id(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id='321', projection=None, resource='a/b')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/321/a/b/generic')
def test_default_projection(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(c.cfs_projection, 'generic')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/generic')
def test_default_projection_change(self):
c = client.ContentForShoppingClient(account_id='123', projection='schema')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(c.cfs_projection, 'schema')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/schema')
def test_request_projection(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection='schema', resource='a/b')
self.assertEqual(c.cfs_projection, 'generic')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/schema')
def test_request_resource(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='x/y/z')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/x/y/z/generic')
def test_path_single(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='r',
path=['1'])
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/r/generic/1')
def test_path_multiple(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='r',
path=['1', '2'])
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/r/generic/1/2')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2008, 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.client
import gdata.gauth
import gdata.data
import atom.mock_http_core
import StringIO
class ClientLoginTest(unittest.TestCase):
def test_token_request(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.SettableHttpClient(200, 'OK',
'SID=DQAAAGgA...7Zg8CTN\n'
'LSID=DQAAAGsA...lk8BBbG\n'
'Auth=DQAAAGgA...dk3fA5N', {'Content-Type': 'text/plain'})
token = client.request_client_login_token('email', 'pw', 'cp', 'test')
self.assert_(isinstance(token, gdata.gauth.ClientLoginToken))
self.assertEqual(token.token_string, 'DQAAAGgA...dk3fA5N')
# Test a server response without a ClientLogin token.`
client.http_client.set_response(200, 'OK', 'SID=12345\nLSID=34567', {})
self.assertRaises(gdata.client.ClientLoginTokenMissing,
client.request_client_login_token, 'email', 'pw', '', '')
# Test a 302 redirect from the server on a login request.
client.http_client.set_response(302, 'ignored', '', {})
# TODO: change the exception class to one in gdata.client.
self.assertRaises(gdata.client.BadAuthenticationServiceURL,
client.request_client_login_token, 'email', 'pw', '', '')
# Test a CAPTCHA challenge from the server
client.http_client.set_response(403, 'Access Forbidden',
'Url=http://www.google.com/login/captcha\n'
'Error=CaptchaRequired\n'
'CaptchaToken=DQAAAGgA...dkI1LK9\n'
# TODO: verify this sample CAPTCHA URL matches an
# actual challenge from the server.
'CaptchaUrl=Captcha?ctoken=HiteT4bVoP6-yFkHPibe7O9EqxeiI7lUSN', {})
try:
token = client.request_client_login_token('email', 'pw', '', '')
self.fail('should raise a CaptchaChallenge on a 403 with a '
'CaptchRequired error.')
except gdata.client.CaptchaChallenge, challenge:
self.assertEquals(challenge.captcha_url,
'http://www.google.com/accounts/'
'Captcha?ctoken=HiteT4bVoP6-yFkHPibe7O9EqxeiI7lUSN')
self.assertEquals(challenge.captcha_token, 'DQAAAGgA...dkI1LK9')
# Test an unexpected response, a 404 for example.
client.http_client.set_response(404, 'ignored', '', {})
self.assertRaises(gdata.client.ClientLoginFailed,
client.request_client_login_token, 'email', 'pw', '', '')
def test_client_login(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.SettableHttpClient(200, 'OK',
'SID=DQAAAGgA...7Zg8CTN\n'
'LSID=DQAAAGsA...lk8BBbG\n'
'Auth=DQAAAGgA...dk3fA5N', {'Content-Type': 'text/plain'})
client.client_login('me@example.com', 'password', 'wise', 'unit test')
self.assert_(isinstance(client.auth_token, gdata.gauth.ClientLoginToken))
self.assertEqual(client.auth_token.token_string, 'DQAAAGgA...dk3fA5N')
class AuthSubTest(unittest.TestCase):
def test_get_and_upgrade_token(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.SettableHttpClient(200, 'OK',
'Token=UpgradedTokenVal\n'
'Extra data', {'Content-Type': 'text/plain'})
page_url = 'http://example.com/showcalendar.html?token=CKF50YzIHxCTKMAg'
client.auth_token = gdata.gauth.AuthSubToken.from_url(page_url)
self.assert_(isinstance(client.auth_token, gdata.gauth.AuthSubToken))
self.assertEqual(client.auth_token.token_string, 'CKF50YzIHxCTKMAg')
upgraded = client.upgrade_token()
self.assert_(isinstance(client.auth_token, gdata.gauth.AuthSubToken))
self.assertEqual(client.auth_token.token_string, 'UpgradedTokenVal')
self.assertEqual(client.auth_token, upgraded)
# Ensure passing in a token returns without modifying client's auth_token.
client.http_client.set_response(200, 'OK', 'Token=4567', {})
upgraded = client.upgrade_token(
gdata.gauth.AuthSubToken.from_url('?token=1234'))
self.assertEqual(upgraded.token_string, '4567')
self.assertEqual(client.auth_token.token_string, 'UpgradedTokenVal')
self.assertNotEqual(client.auth_token, upgraded)
# Test exception cases
client.auth_token = None
self.assertRaises(gdata.client.UnableToUpgradeToken, client.upgrade_token,
None)
self.assertRaises(gdata.client.UnableToUpgradeToken, client.upgrade_token)
def test_revoke_token(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.SettableHttpClient(
200, 'OK', '', {})
page_url = 'http://example.com/showcalendar.html?token=CKF50YzIHxCTKMAg'
client.auth_token = gdata.gauth.AuthSubToken.from_url(page_url)
deleted = client.revoke_token()
self.assert_(deleted)
self.assertEqual(
client.http_client.last_request.headers['Authorization'],
'AuthSub token=CKF50YzIHxCTKMAg')
class OAuthTest(unittest.TestCase):
def test_hmac_flow(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.SettableHttpClient(
200, 'OK', 'oauth_token=ab3cd9j4ks7&oauth_token_secret=ZXhhbXBsZS',
{})
request_token = client.get_oauth_token(
['http://example.com/service'], 'http://example.net/myapp',
'consumer', consumer_secret='secret')
# Check that the response was correctly parsed.
self.assertEqual(request_token.token, 'ab3cd9j4ks7')
self.assertEqual(request_token.token_secret, 'ZXhhbXBsZS')
self.assertEqual(request_token.auth_state, gdata.gauth.REQUEST_TOKEN)
# Also check the Authorization header which was sent in the request.
auth_header = client.http_client.last_request.headers['Authorization']
self.assert_('OAuth' in auth_header)
self.assert_(
'oauth_callback="http%3A%2F%2Fexample.net%2Fmyapp"' in auth_header)
self.assert_('oauth_version="1.0"' in auth_header)
self.assert_('oauth_signature_method="HMAC-SHA1"' in auth_header)
self.assert_('oauth_consumer_key="consumer"' in auth_header)
# Check generation of the authorization URL.
authorize_url = request_token.generate_authorization_url()
self.assert_(str(authorize_url).startswith(
'https://www.google.com/accounts/OAuthAuthorizeToken'))
self.assert_('oauth_token=ab3cd9j4ks7' in str(authorize_url))
# Check that the token information from the browser's URL is parsed.
redirected_url = (
'http://example.net/myapp?oauth_token=CKF5zz&oauth_verifier=Xhhbas')
gdata.gauth.authorize_request_token(request_token, redirected_url)
self.assertEqual(request_token.token, 'CKF5zz')
self.assertEqual(request_token.verifier, 'Xhhbas')
self.assertEqual(request_token.auth_state,
gdata.gauth.AUTHORIZED_REQUEST_TOKEN)
# Check that the token upgrade response was correctly parsed.
client.http_client.set_response(
200, 'OK', 'oauth_token=3cd9Fj417&oauth_token_secret=Xhrh6bXBs', {})
access_token = client.get_access_token(request_token)
self.assertEqual(request_token.token, '3cd9Fj417')
self.assertEqual(request_token.token_secret, 'Xhrh6bXBs')
self.assert_(request_token.verifier is None)
self.assertEqual(request_token.auth_state, gdata.gauth.ACCESS_TOKEN)
self.assertEqual(request_token.token, access_token.token)
self.assertEqual(request_token.token_secret, access_token.token_secret)
self.assert_(access_token.verifier is None)
self.assertEqual(request_token.auth_state, access_token.auth_state)
# Also check the Authorization header which was sent in the request.
auth_header = client.http_client.last_request.headers['Authorization']
self.assert_('OAuth' in auth_header)
self.assert_('oauth_callback="' not in auth_header)
self.assert_('oauth_version="1.0"' in auth_header)
self.assert_('oauth_verifier="Xhhbas"' in auth_header)
self.assert_('oauth_signature_method="HMAC-SHA1"' in auth_header)
self.assert_('oauth_consumer_key="consumer"' in auth_header)
class RequestTest(unittest.TestCase):
def test_simple_request(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.EchoHttpClient()
response = client.request('GET', 'https://example.com/test')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
self.assertEqual(response.getheader('Echo-Uri'), '/test')
self.assertEqual(response.getheader('Echo-Scheme'), 'https')
self.assertEqual(response.getheader('Echo-Method'), 'GET')
http_request = atom.http_core.HttpRequest(
uri=atom.http_core.Uri(scheme='http', host='example.net', port=8080),
method='POST', headers={'X': 1})
http_request.add_body_part('test', 'text/plain')
response = client.request(http_request=http_request)
self.assertEqual(response.getheader('Echo-Host'), 'example.net:8080')
# A Uri with path set to None should default to /.
self.assertEqual(response.getheader('Echo-Uri'), '/')
self.assertEqual(response.getheader('Echo-Scheme'), 'http')
self.assertEqual(response.getheader('Echo-Method'), 'POST')
self.assertEqual(response.getheader('Content-Type'), 'text/plain')
self.assertEqual(response.getheader('X'), '1')
self.assertEqual(response.read(), 'test')
# Use the same request object from above, but overwrite the request path
# by passing in a URI.
response = client.request(uri='/new/path?p=1', http_request=http_request)
self.assertEqual(response.getheader('Echo-Host'), 'example.net:8080')
self.assertEqual(response.getheader('Echo-Uri'), '/new/path?p=1')
self.assertEqual(response.getheader('Echo-Scheme'), 'http')
self.assertEqual(response.read(), 'test')
def test_gdata_version_header(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.EchoHttpClient()
response = client.request('GET', 'http://example.com')
self.assertEqual(response.getheader('GData-Version'), None)
client.api_version = '2'
response = client.request('GET', 'http://example.com')
self.assertEqual(response.getheader('GData-Version'), '2')
def test_redirects(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.MockHttpClient()
# Add the redirect response for the initial request.
first_request = atom.http_core.HttpRequest('http://example.com/1',
'POST')
client.http_client.add_response(first_request, 302, None,
{'Location': 'http://example.com/1?gsessionid=12'})
second_request = atom.http_core.HttpRequest(
'http://example.com/1?gsessionid=12', 'POST')
client.http_client.AddResponse(second_request, 200, 'OK', body='Done')
response = client.Request('POST', 'http://example.com/1')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
self.assertEqual(response.read(), 'Done')
redirect_loop_request = atom.http_core.HttpRequest(
'http://example.com/2?gsessionid=loop', 'PUT')
client.http_client.add_response(redirect_loop_request, 302, None,
{'Location': 'http://example.com/2?gsessionid=loop'})
try:
response = client.request(method='PUT', uri='http://example.com/2?gsessionid=loop')
self.fail('Loop URL should have redirected forever.')
except gdata.client.RedirectError, err:
self.assert_(str(err).startswith('Too many redirects from server'))
def test_lowercase_location(self):
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.MockHttpClient()
# Add the redirect response for the initial request.
first_request = atom.http_core.HttpRequest('http://example.com/1',
'POST')
# In some environments, notably App Engine, the HTTP headers which come
# back from a server will be normalized to all lowercase.
client.http_client.add_response(first_request, 302, None,
{'location': 'http://example.com/1?gsessionid=12'})
second_request = atom.http_core.HttpRequest(
'http://example.com/1?gsessionid=12', 'POST')
client.http_client.AddResponse(second_request, 200, 'OK', body='Done')
response = client.Request('POST', 'http://example.com/1')
self.assertEqual(response.status, 200)
self.assertEqual(response.reason, 'OK')
self.assertEqual(response.read(), 'Done')
def test_exercise_exceptions(self):
# TODO
pass
def test_converter_vs_desired_class(self):
def bad_converter(string):
return 1
class TestClass(atom.core.XmlElement):
_qname = '{http://www.w3.org/2005/Atom}entry'
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.EchoHttpClient()
test_entry = gdata.data.GDEntry()
result = client.post(test_entry, 'http://example.com')
self.assert_(isinstance(result, gdata.data.GDEntry))
result = client.post(test_entry, 'http://example.com', converter=bad_converter)
self.assertEquals(result, 1)
result = client.post(test_entry, 'http://example.com', desired_class=TestClass)
self.assert_(isinstance(result, TestClass))
class QueryTest(unittest.TestCase):
def test_query_modifies_request(self):
request = atom.http_core.HttpRequest()
gdata.client.Query(
text_query='foo', categories=['a', 'b']).modify_request(request)
# categories param from above is named category in URL
self.assertEqual(request.uri.query, {'q': 'foo', 'category': 'a,b'})
def test_client_uses_query_modification(self):
"""If the Query is passed as an unexpected param it should apply"""
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.EchoHttpClient()
query = gdata.client.Query(max_results=7)
client.http_client = atom.mock_http_core.SettableHttpClient(
201, 'CREATED', gdata.data.GDEntry().ToString(), {})
response = client.get('https://example.com/foo', a_random_param=query)
self.assertEqual(
client.http_client.last_request.uri.query['max-results'], '7')
class VersionConversionTest(unittest.TestCase):
def test_use_default_version(self):
self.assertEquals(gdata.client.get_xml_version(None), 1)
def test_str_to_int_version(self):
self.assertEquals(gdata.client.get_xml_version('1'), 1)
self.assertEquals(gdata.client.get_xml_version('2'), 2)
self.assertEquals(gdata.client.get_xml_version('2.1.2'), 2)
self.assertEquals(gdata.client.get_xml_version('10.4'), 10)
class UpdateTest(unittest.TestCase):
"""Test Update/PUT"""
def test_update_uri_editlink(self):
"""Test that the PUT uri is grabbed from the entry's edit link"""
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.SettableHttpClient(
200, 'OK', gdata.data.GDEntry().ToString(), {})
entry = gdata.data.GDEntry()
entry.link.append(atom.data.Link(rel='edit', href='https://example.com/edit'))
response = client.update(entry)
request = client.http_client.last_request
self.assertEqual(str(client.http_client.last_request.uri),
'https://example.com/edit')
def test_update_uri(self):
"""Test that when passed, a uri overrides the entry's edit link"""
client = gdata.client.GDClient()
client.http_client = atom.mock_http_core.SettableHttpClient(
200, 'OK', gdata.data.GDEntry().ToString(), {})
entry = gdata.data.GDEntry()
entry.link.append(atom.data.Link(rel='edit', href='https://example.com/edit'))
response = client.update(entry, uri='https://example.com/test')
self.assertEqual(str(client.http_client.last_request.uri),
'https://example.com/test')
def suite():
return unittest.TestSuite((unittest.makeSuite(ClientLoginTest, 'test'),
unittest.makeSuite(AuthSubTest, 'test'),
unittest.makeSuite(OAuthTest, 'test'),
unittest.makeSuite(RequestTest, 'test'),
unittest.makeSuite(VersionConversionTest, 'test'),
unittest.makeSuite(QueryTest, 'test'),
unittest.makeSuite(UpdateTest, 'test')))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
from gdata import test_data
import gdata.photos
class AlbumFeedTest(unittest.TestCase):
def setUp(self):
self.album_feed = gdata.photos.AlbumFeedFromString(test_data.ALBUM_FEED)
def testCorrectXmlParsing(self):
self.assert_(self.album_feed.id.text == 'http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1')
self.assert_(self.album_feed.gphoto_id.text == '1')
self.assert_(len(self.album_feed.entry) == 4)
for entry in self.album_feed.entry:
if entry.id.text == 'http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2':
self.assert_(entry.summary.text == 'Blue')
class PhotoFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.photos.PhotoFeedFromString(test_data.ALBUM_FEED)
def testCorrectXmlParsing(self):
for entry in self.feed.entry:
if entry.id.text == 'http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2':
self.assert_(entry.gphoto_id.text == '2')
self.assert_(entry.albumid.text == '1')
self.assert_(entry.exif.flash.text == 'true')
self.assert_(entry.media.title.type == 'plain')
self.assert_(entry.media.title.text == 'Aqua Blue.jpg')
self.assert_(len(entry.media.thumbnail) == 3)
class AnyFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.photos.AnyFeedFromString(test_data.ALBUM_FEED)
def testEntryTypeConversion(self):
for entry in self.feed.entry:
if entry.id.text == 'http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/':
self.assert_(isinstance(entry, gdata.photos.PhotoEntry))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
from gdata import test_data
import atom
import gdata.contacts
class ContactEntryTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.contacts.ContactEntryFromString(test_data.NEW_CONTACT)
def testParsingTestEntry(self):
self.assertEquals(self.entry.title.text, 'Fitzgerald')
self.assertEquals(len(self.entry.email), 2)
for email in self.entry.email:
if email.rel == 'http://schemas.google.com/g/2005#work':
self.assertEquals(email.address, 'liz@gmail.com')
elif email.rel == 'http://schemas.google.com/g/2005#home':
self.assertEquals(email.address, 'liz@example.org')
self.assertEquals(len(self.entry.phone_number), 3)
self.assertEquals(len(self.entry.postal_address), 1)
self.assertEquals(self.entry.postal_address[0].primary, 'true')
self.assertEquals(self.entry.postal_address[0].text,
'1600 Amphitheatre Pkwy Mountain View')
self.assertEquals(len(self.entry.im), 1)
self.assertEquals(len(self.entry.group_membership_info), 1)
self.assertEquals(self.entry.group_membership_info[0].href,
'http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f')
self.assertEquals(self.entry.group_membership_info[0].deleted, 'false')
self.assertEquals(len(self.entry.extended_property), 2)
self.assertEquals(self.entry.extended_property[0].name, 'pet')
self.assertEquals(self.entry.extended_property[0].value, 'hamster')
self.assertEquals(self.entry.extended_property[1].name, 'cousine')
self.assertEquals(
self.entry.extended_property[1].GetXmlBlobExtensionElement().tag,
'italian')
def testToAndFromString(self):
copied_entry = gdata.contacts.ContactEntryFromString(str(self.entry))
self.assertEquals(copied_entry.title.text, 'Fitzgerald')
self.assertEquals(len(copied_entry.email), 2)
for email in copied_entry.email:
if email.rel == 'http://schemas.google.com/g/2005#work':
self.assertEquals(email.address, 'liz@gmail.com')
elif email.rel == 'http://schemas.google.com/g/2005#home':
self.assertEquals(email.address, 'liz@example.org')
self.assertEquals(len(copied_entry.phone_number), 3)
self.assertEquals(len(copied_entry.postal_address), 1)
self.assertEquals(copied_entry.postal_address[0].primary, 'true')
self.assertEquals(copied_entry.postal_address[0].text,
'1600 Amphitheatre Pkwy Mountain View')
self.assertEquals(len(copied_entry.im), 1)
self.assertEquals(len(copied_entry.group_membership_info), 1)
self.assertEquals(copied_entry.group_membership_info[0].href,
'http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f')
self.assertEquals(copied_entry.group_membership_info[0].deleted, 'false')
self.assertEquals(len(copied_entry.extended_property), 2)
self.assertEquals(copied_entry.extended_property[0].name, 'pet')
self.assertEquals(copied_entry.extended_property[0].value, 'hamster')
self.assertEquals(copied_entry.extended_property[1].name, 'cousine')
self.assertEquals(
copied_entry.extended_property[1].GetXmlBlobExtensionElement().tag,
'italian')
def testCreateContactFromScratch(self):
# Create a new entry
new_entry = gdata.contacts.ContactEntry()
new_entry.title = atom.Title(text='Elizabeth Bennet')
new_entry.content = atom.Content(text='Test Notes')
new_entry.email.append(gdata.contacts.Email(
rel='http://schemas.google.com/g/2005#work',
address='liz@gmail.com'))
new_entry.phone_number.append(gdata.contacts.PhoneNumber(
rel='http://schemas.google.com/g/2005#work', text='(206)555-1212'))
new_entry.organization = gdata.contacts.Organization(
org_name=gdata.contacts.OrgName(text='TestCo.'))
new_entry.extended_property.append(gdata.ExtendedProperty(name='test',
value='1234'))
new_entry.birthday = gdata.contacts.Birthday(when='2009-7-23')
sports_property = gdata.ExtendedProperty(name='sports')
sports_property.SetXmlBlob('<dance><salsa/><ballroom_dancing/></dance>')
new_entry.extended_property.append(sports_property)
# Generate and parse the XML for the new entry.
entry_copy = gdata.contacts.ContactEntryFromString(str(new_entry))
self.assertEquals(entry_copy.title.text, new_entry.title.text)
self.assertEquals(entry_copy.content.text, 'Test Notes')
self.assertEquals(len(entry_copy.email), 1)
self.assertEquals(entry_copy.email[0].rel, new_entry.email[0].rel)
self.assertEquals(entry_copy.email[0].address, 'liz@gmail.com')
self.assertEquals(len(entry_copy.phone_number), 1)
self.assertEquals(entry_copy.phone_number[0].rel,
new_entry.phone_number[0].rel)
self.assertEquals(entry_copy.birthday.when, '2009-7-23')
self.assertEquals(entry_copy.phone_number[0].text, '(206)555-1212')
self.assertEquals(entry_copy.organization.org_name.text, 'TestCo.')
self.assertEquals(len(entry_copy.extended_property), 2)
self.assertEquals(entry_copy.extended_property[0].name, 'test')
self.assertEquals(entry_copy.extended_property[0].value, '1234')
class ContactsFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.contacts.ContactsFeedFromString(test_data.CONTACTS_FEED)
def testParsingTestFeed(self):
self.assertEquals(self.feed.id.text,
'http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base')
self.assertEquals(self.feed.title.text, 'Contacts')
self.assertEquals(self.feed.total_results.text, '1')
self.assertEquals(len(self.feed.entry), 1)
self.assert_(isinstance(self.feed.entry[0], gdata.contacts.ContactEntry))
self.assertEquals(self.feed.entry[0].GetPhotoLink().href,
'http://google.com/m8/feeds/photos/media/liz%40gmail.com/c9012de')
self.assertEquals(self.feed.entry[0].GetPhotoEditLink().href,
'http://www.google.com/m8/feeds/photos/media/liz%40gmail.com/'
'c9012de/photo4524')
def testToAndFromString(self):
copied_feed = gdata.contacts.ContactsFeedFromString(str(self.feed))
self.assertEquals(copied_feed.id.text,
'http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base')
self.assertEquals(copied_feed.title.text, 'Contacts')
self.assertEquals(copied_feed.total_results.text, '1')
self.assertEquals(len(copied_feed.entry), 1)
self.assert_(isinstance(copied_feed.entry[0], gdata.contacts.ContactEntry))
class GroupsFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.contacts.GroupsFeedFromString(
test_data.CONTACT_GROUPS_FEED)
def testParsingGroupsFeed(self):
self.assertEquals(self.feed.id.text, 'jo@gmail.com')
self.assertEquals(self.feed.title.text, 'Jo\'s Contact Groups')
self.assertEquals(self.feed.total_results.text, '3')
self.assertEquals(len(self.feed.entry), 1)
self.assert_(isinstance(self.feed.entry[0], gdata.contacts.GroupEntry))
class GroupEntryTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.contacts.GroupEntryFromString(
test_data.CONTACT_GROUP_ENTRY)
def testParsingTestEntry(self):
self.assertEquals(self.entry.title.text, 'Salsa group')
self.assertEquals(len(self.entry.extended_property), 1)
self.assertEquals(self.entry.extended_property[0].name,
'more info about the group')
self.assertEquals(
self.entry.extended_property[0].GetXmlBlobExtensionElement().namespace,
atom.ATOM_NAMESPACE)
self.assertEquals(
self.entry.extended_property[0].GetXmlBlobExtensionElement().tag,
'info')
self.assertEquals(
self.entry.extended_property[0].GetXmlBlobExtensionElement().text,
'Very nice people.')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2008, 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'e.bidelman@google.com (Eric Bidelman)'
import os
import unittest
import atom.data
import gdata.client
import gdata.data
import gdata.gauth
import gdata.docs.client
import gdata.docs.data
import gdata.test_config as conf
TEST_FILE_LOCATION_OPTION = conf.Option(
'file',
'Please enter the full path to a test file to upload',
description=('This test file will be uploaded to DocList which. An example '
'file can be found in tests/gdata_tests/docs/test.doc'))
CONTENT_TYPE_OPTION = conf.Option(
'contenttype',
'Please enter the mimetype of the file',
description='The content type should match that of the upload file.')
conf.options.register_option(TEST_FILE_LOCATION_OPTION)
conf.options.register_option(CONTENT_TYPE_OPTION)
class ResumableUploadTestCase(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.docs.client.DocsClient(source='ResumableUploadTest')
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
self.f = open(conf.options.get_value('file'))
self.content_type = conf.options.get_value('contenttype')
conf.configure_client(
self.client, 'ResumableUploadTest', self.client.auth_service)
def tearDown(self):
conf.close_client(self.client)
self.f.close()
def testUploadEntireDocumentAndUpdate(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUploadDocument')
uploader = gdata.client.ResumableUploader(
self.client, self.f, self.content_type, os.path.getsize(self.f.name),
chunk_size=20000, # 20000 bytes.
desired_class=gdata.docs.data.DocsEntry)
e = gdata.docs.data.DocsEntry(
title=atom.data.Title(text='MyResumableTitleEntireFile'))
e.category.append(gdata.docs.data.make_kind_category('document'))
e.writers_can_invite = gdata.docs.data.WritersCanInvite(value='false')
entry = uploader.UploadFile(
'/feeds/upload/create-session/default/private/full', entry=e)
# Verify upload has really completed.
self.assertEqual(uploader.QueryUploadStatus(), True)
self.assert_(isinstance(entry, gdata.docs.data.DocsEntry))
self.assertEqual(entry.title.text, 'MyResumableTitleEntireFile')
self.assertEqual(entry.GetDocumentType(), 'document')
self.assertEqual(entry.writers_can_invite.value, 'false')
self.assertEqual(int(entry.quota_bytes_used.text), 0)
self.client.Delete(entry, force=True)
def testUploadDocumentInChunks(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUploadDocumentInChunks')
uploader = gdata.client.ResumableUploader(
self.client, self.f, self.content_type, os.path.getsize(self.f.name),
desired_class=gdata.docs.data.DocsEntry)
uploader._InitSession(
'/feeds/upload/create-session/default/private/full',
headers={'Slug': 'MyManualChunksNoAtomTitle'})
start_byte = 0
entry = None
while not entry:
entry = uploader.UploadChunk(
start_byte, uploader.file_handle.read(uploader.chunk_size))
start_byte += uploader.chunk_size
# Verify upload has really completed.
self.assertEqual(uploader.QueryUploadStatus(), True)
self.assert_(isinstance(entry, gdata.docs.data.DocsEntry))
self.assertEqual(entry.title.text, 'MyManualChunksNoAtomTitle')
self.assertEqual(entry.GetDocumentType(), 'document')
self.client.Delete(entry, force=True)
def suite():
return conf.build_suite([ResumableUploadTestCase])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.test_config as conf
import gdata.analytics.client
import gdata.apps.emailsettings.client
import gdata.blogger.client
import gdata.spreadsheets.client
import gdata.calendar_resource.client
import gdata.contacts.client
import gdata.docs.client
import gdata.maps.client
import gdata.projecthosting.client
import gdata.sites.client
class ClientSmokeTest(unittest.TestCase):
def test_check_auth_client_classes(self):
conf.check_clients_with_auth(self, (
gdata.analytics.client.AnalyticsClient,
gdata.apps.emailsettings.client.EmailSettingsClient,
gdata.blogger.client.BloggerClient,
gdata.spreadsheets.client.SpreadsheetsClient,
gdata.calendar_resource.client.CalendarResourceClient,
gdata.contacts.client.ContactsClient,
gdata.docs.client.DocsClient,
gdata.maps.client.MapsClient,
gdata.projecthosting.client.ProjectHostingClient,
gdata.sites.client.SitesClient
))
def suite():
return conf.build_suite([ClientSmokeTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'samuel.cyprian@gmail.com (Samuel Cyprian)'
import unittest
from gdata import oauth, test_config
HTTP_METHOD_POST = 'POST'
VERSION = '1.0'
class OauthUtilsTest(unittest.TestCase):
def test_build_authenticate_header(self):
self.assertEqual(oauth.build_authenticate_header(),
{'WWW-Authenticate' :'OAuth realm=""'})
self.assertEqual(oauth.build_authenticate_header('foo'),
{'WWW-Authenticate': 'OAuth realm="foo"'})
def test_escape(self):
#Special cases
self.assertEqual(oauth.escape('~'), '~')
self.assertEqual(oauth.escape('/'), '%2F')
self.assertEqual(oauth.escape('+'), '%2B')
self.assertEqual(oauth.escape(' '), '%20')
self.assertEqual(oauth.escape('Peter Strömberg'),
'Peter%20Str%C3%B6mberg')
def test_generate_timestamp(self):
self.assertTrue(oauth.generate_timestamp()>0)
self.assertTrue(type(oauth.generate_timestamp()) is type(0))
def test_generate_nonce(self):
DEFAULT_NONCE_LENGTH = 8
self.assertTrue(len(oauth.generate_nonce()) is DEFAULT_NONCE_LENGTH)
self.assertTrue(type(oauth.generate_nonce()) is type(''))
class OAuthConsumerTest(unittest.TestCase):
def setUp(self):
self.key = 'key'
self.secret = 'secret'
self.consumer = oauth.OAuthConsumer(self.key, self.secret)
def test_OAuthConsumer_attr_key(self):
self.assertEqual(self.consumer.key, self.key)
def test_OAuthConsumer_attr_secret(self):
self.assertEqual(self.consumer.secret, self.secret)
class OAuthTokenTest(unittest.TestCase):
def setUp(self):
self.key = 'key'
self.secret = 'secret'
self.token = oauth.OAuthToken(self.key, self.secret)
def test_OAuthToken_attr_key(self):
self.assertEqual(self.token.key, self.key)
def test_OAuthToken_attr_secret(self):
self.assertEqual(self.token.secret, self.secret)
def test_to_string(self):
self.assertEqual(self.token.to_string(),
'oauth_token_secret=secret&oauth_token=key')
t = oauth.OAuthToken('+', '%')
self.assertEqual(t.to_string(),
'oauth_token_secret=%25&oauth_token=%2B')
def test_from_string(self):
s = 'oauth_token_secret=secret&oauth_token=key'
t = oauth.OAuthToken.from_string(s)
self.assertEqual(t.key, 'key')
self.assertEqual(t.secret, 'secret')
t = oauth.OAuthToken.from_string('oauth_token_secret=%25&oauth_token=%2B')
self.assertEqual(t.key, '+')
self.assertEqual(t.secret, '%')
def test___str__(self):
self.assertEqual(str(self.token),
'oauth_token_secret=secret&oauth_token=key')
t = oauth.OAuthToken('+', '%')
self.assertEqual(str(t), 'oauth_token_secret=%25&oauth_token=%2B')
class OAuthParameters(object):
CONSUMER_KEY = 'oauth_consumer_key'
TOKEN = 'oauth_token'
SIGNATURE_METHOD = 'oauth_signature_method'
SIGNATURE = 'oauth_signature'
TIMESTAMP = 'oauth_timestamp'
NONCE = 'oauth_nonce'
VERSION = 'oauth_version'
CALLBACK = 'oauth_callback'
ALL_PARAMETERS = (CONSUMER_KEY,
TOKEN,
SIGNATURE_METHOD,
SIGNATURE,
TIMESTAMP,
NONCE,
VERSION)
class OAuthTest(unittest.TestCase):
def setUp(self):
self.consumer = oauth.OAuthConsumer('a56b5ff0a637ab283d1d8e32ced37a9c',
'9a3248210c84b264b56b98c0b872bc8a')
self.token = oauth.OAuthToken('5b2cafbf20b11bace53b29e37d8a673d',
'3f71254637df2002d8819458ae4f6c51')
self.http_url = 'http://dev.alicehub.com/server/api/newsfeed/update/'
self.http_method = HTTP_METHOD_POST
class OAuthRequestTest(OAuthTest):
def setUp(self):
super(OAuthRequestTest, self).setUp()
self.signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.non_oauth_param_message = 'message'
self.non_oauth_param_context_id = 'context_id'
self.parameters = {OAuthParameters.CONSUMER_KEY:self.consumer.key,
OAuthParameters.TOKEN: self.token.key,
OAuthParameters.SIGNATURE_METHOD: 'HMAC-SHA1',
OAuthParameters.SIGNATURE:
'947ysBZiMn6FGZ11AW06Ioco4mo=',
OAuthParameters.TIMESTAMP: '1278573584',
OAuthParameters.NONCE: '1770704051',
OAuthParameters.VERSION: VERSION,
self.non_oauth_param_message:'hey',
self.non_oauth_param_context_id:'',}
oauth_params_string = """
oauth_nonce="1770704051",
oauth_timestamp="1278573584",
oauth_consumer_key="a56b5ff0a637ab283d1d8e32ced37a9c",
oauth_signature_method="HMAC-SHA1",
oauth_version="1.0",
oauth_token="5b2cafbf20b11bace53b29e37d8a673d",
oauth_signature="947ysBZiMn6FGZ11AW06Ioco4mo%3D"
"""
self.oauth_header_with_realm = {'Authorization': """OAuth
realm="http://example.com", %s """ % oauth_params_string}
self.oauth_header_without_realm = {'Authorization': 'OAuth %s'
% oauth_params_string}
self.additional_param = 'foo'
self.additional_value = 'bar'
self.oauth_request = oauth.OAuthRequest(self.http_method,
self.http_url,
self.parameters)
def test_set_parameter(self):
self.oauth_request.set_parameter(self.additional_param,
self.additional_value)
self.assertEqual(self.oauth_request.get_parameter(self.additional_param),
self.additional_value)
def test_get_parameter(self):
self.assertRaises(oauth.OAuthError,
self.oauth_request.get_parameter,
self.additional_param)
self.oauth_request.set_parameter(self.additional_param,
self.additional_value)
self.assertEqual(self.oauth_request.get_parameter(self.additional_param),
self.additional_value)
def test__get_timestamp_nonce(self):
self.assertEqual(self.oauth_request._get_timestamp_nonce(),
(self.parameters[OAuthParameters.TIMESTAMP],
self.parameters[OAuthParameters.NONCE]))
def test_get_nonoauth_parameters(self):
non_oauth_params = self.oauth_request.get_nonoauth_parameters()
self.assertTrue(non_oauth_params.has_key(self.non_oauth_param_message))
self.assertFalse(non_oauth_params.has_key(OAuthParameters.CONSUMER_KEY))
def test_to_header(self):
realm = 'google'
header_without_realm = self.oauth_request.to_header()\
.get('Authorization')
header_with_realm = self.oauth_request.to_header(realm)\
.get('Authorization')
self.assertTrue(header_with_realm.find(realm))
for k in OAuthParameters.ALL_PARAMETERS:
self.assertTrue(header_without_realm.find(k) > -1)
self.assertTrue(header_with_realm.find(k) > -1)
def check_for_params_in_string(self, params, s):
for k, v in params.iteritems():
self.assertTrue(s.find(oauth.escape(k)) > -1)
self.assertTrue(s.find(oauth.escape(v)) > -1)
def test_to_postdata(self):
post_data = self.oauth_request.to_postdata()
self.check_for_params_in_string(self.parameters, post_data)
def test_to_url(self):
GET_url = self.oauth_request.to_url()
self.assertTrue(GET_url\
.find(self.oauth_request.get_normalized_http_url()) > -1)
self.assertTrue(GET_url.find('?') > -1)
self.check_for_params_in_string(self.parameters, GET_url)
def test_get_normalized_parameters(self):
_params = self.parameters.copy()
normalized_params = self.oauth_request.get_normalized_parameters()
self.assertFalse(normalized_params\
.find(OAuthParameters.SIGNATURE + '=') > -1)
self.assertTrue(self.parameters.get(OAuthParameters.SIGNATURE) is None)
key_values = [tuple(kv.split('=')) for kv in normalized_params.split('&')]
del _params[OAuthParameters.SIGNATURE]
expected_key_values = _params.items()
expected_key_values.sort()
for k, v in expected_key_values:
self.assertTrue(expected_key_values.index((k,v))\
is key_values.index((oauth.escape(k), oauth.escape(v))))
def test_get_normalized_http_method(self):
lower_case_http_method = HTTP_METHOD_POST.lower()
self.oauth_request.http_method = lower_case_http_method
self.assertEqual(self.oauth_request.get_normalized_http_method(),
lower_case_http_method.upper())
def test_get_normalized_http_url(self):
url1 = 'HTTP://Example.com:80/resource?id=123'
expected_url1 = "http://example.com/resource"
self.oauth_request.http_url = url1
self.assertEqual(self.oauth_request.get_normalized_http_url(),
expected_url1)
url2 = 'HTTPS://Example.com:443/resource?id=123'
expected_url2 = "https://example.com/resource"
self.oauth_request.http_url = url2
self.assertEqual(self.oauth_request.get_normalized_http_url(),
expected_url2)
url3 = 'HTTP://Example.com:8080/resource?id=123'
expected_url3 = "http://example.com:8080/resource"
self.oauth_request.http_url = url3
self.assertEqual(self.oauth_request.get_normalized_http_url(),
expected_url3)
def test_sign_request(self):
expected_signature = self.oauth_request.parameters\
.get(OAuthParameters.SIGNATURE)
del self.oauth_request.parameters[OAuthParameters.SIGNATURE]
self.oauth_request.sign_request(self.signature_method,
self.consumer,
self.token)
self.assertEqual(self.oauth_request.parameters\
.get(OAuthParameters.SIGNATURE), expected_signature)
def test_build_signature(self):
expected_signature = self.oauth_request.parameters\
.get(OAuthParameters.SIGNATURE)
self.assertEqual(self.oauth_request.build_signature(self.signature_method,
self.consumer,
self.token),
expected_signature)
def test_from_request(self):
request = oauth.OAuthRequest.from_request(self.http_method, self.http_url,
self.oauth_header_with_realm,
{},
"message=hey&context_id=")
self.assertEqual(request.__dict__, self.oauth_request.__dict__)
self.assertTrue(isinstance(request, oauth.OAuthRequest))
def test_from_consumer_and_token(self):
request = oauth.OAuthRequest.from_consumer_and_token(self.consumer,
self.token,
self.http_method,
self.http_url)
self.assertTrue(isinstance(request, oauth.OAuthRequest))
def test_from_token_and_callback(self):
callback = 'http://example.com'
request = oauth.OAuthRequest.from_token_and_callback(self.token,
callback,
self.http_method,
self.http_url)
self.assertTrue(isinstance(request, oauth.OAuthRequest))
self.assertEqual(request.get_parameter(OAuthParameters.CALLBACK), callback)
def test__split_header(self):
del self.parameters[self.non_oauth_param_message]
del self.parameters[self.non_oauth_param_context_id]
self.assertEqual(oauth.OAuthRequest._split_header(self\
.oauth_header_with_realm['Authorization']), self.parameters)
self.assertEqual(oauth.OAuthRequest._split_header(self\
.oauth_header_without_realm['Authorization']), self.parameters)
def test_split_url_string(self):
qs = "a=1&c=hi%20there&empty="
expected_result = {'a': '1',
'c': 'hi there',
'empty': ''}
self.assertEqual(oauth.OAuthRequest._split_url_string(qs), expected_result)
class OAuthServerTest(OAuthTest):
def setUp(self):
super(OAuthServerTest, self).setUp()
self.signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.data_store = MockOAuthDataStore()
self.user = MockUser('Foo Bar')
self.request_token_url = "http://example.com/oauth/request_token"
self.access_token_url = "http://example.com/oauth/access_token"
self.oauth_server = oauth.OAuthServer(self.data_store,
{self.signature_method.get_name():self.signature_method})
def _prepare_request(self, request, token = None):
request.set_parameter(OAuthParameters.SIGNATURE_METHOD,
self.signature_method.get_name())
request.set_parameter(OAuthParameters.NONCE, oauth.generate_nonce())
request.set_parameter(OAuthParameters.TIMESTAMP,
oauth.generate_timestamp())
request.sign_request(self.signature_method, self.consumer, token)
def _get_token(self, request):
self._prepare_request(request)
return self.oauth_server.fetch_request_token(request)
def _get_authorized_token(self, request):
req_token = self._get_token(request)
return self.oauth_server.authorize_token(req_token, self.user)
def test_set_data_store(self):
self.oauth_server.data_store = None
self.assertTrue(self.oauth_server.data_store is None)
self.oauth_server.set_data_store(self.data_store)
self.assertTrue(self.oauth_server.data_store is not None)
self.assertTrue(isinstance(self.oauth_server.data_store,
oauth.OAuthDataStore))
def test_get_data_store(self):
self.assertEqual(self.oauth_server.data_store, self.data_store)
def test_add_signature_method(self):
signature_method = oauth.OAuthSignatureMethod_PLAINTEXT()
self.oauth_server.add_signature_method(signature_method)
self.assertTrue(isinstance(self.oauth_server.signature_methods\
.get(signature_method.get_name()),
oauth.OAuthSignatureMethod_PLAINTEXT))
def test_fetch_request_token(self):
initial_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
http_method=self.http_method,
http_url=self.request_token_url
)
req_token_1 = self._get_token(initial_request)
authorization_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
req_token_1,
http_method=self.http_method,
http_url=self.http_url
)
req_token_2 = self._get_token(authorization_request)
self.assertEqual(req_token_1.key, req_token_2.key)
self.assertEqual(req_token_1.secret, req_token_2.secret)
def _get_token_for_authorization(self):
request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
http_method=self.http_method,
http_url=self.request_token_url
)
request_token = self._get_token(request)
authorization_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
request_token,
http_method=self.http_method,
http_url=self.http_url
)
return self._get_authorized_token(authorization_request)
def test_authorize_token(self):
authorized_token = self._get_token_for_authorization()
self.assertTrue(authorized_token is not None)
def _get_access_token_request(self, authorized_token):
access_token_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
authorized_token,
http_method=self.http_method,
http_url=self.access_token_url
)
self._prepare_request(access_token_request, authorized_token)
return access_token_request
def test_fetch_access_token(self):
authorized_token = self._get_token_for_authorization()
access_token_request = self._get_access_token_request(authorized_token)
access_token = self.oauth_server.fetch_access_token(access_token_request)
self.assertTrue(access_token is not None)
self.assertNotEqual(str(authorized_token), str(access_token))
# Try to fetch access_token with used request token
self.assertRaises(oauth.OAuthError, self.oauth_server.fetch_access_token,
access_token_request)
def test_verify_request(self):
authorized_token = self._get_token_for_authorization()
access_token_request = self._get_access_token_request(authorized_token)
access_token = self.oauth_server.fetch_access_token(access_token_request)
param1 = 'p1'
value1 = 'v1'
api_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
access_token,
http_method=self.http_method,
http_url=self.http_url,
parameters={param1:value1}
)
self._prepare_request(api_request, access_token)
result = self.oauth_server.verify_request(api_request)
self.assertTrue(result is not None)
consumer, token, parameters = result
self.assertEqual(parameters.get(param1), value1)
def test_get_callback(self):
request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
None,
http_method=self.http_method,
http_url=self.http_url
)
self._prepare_request(request)
cb_url = 'http://example.com/cb'
request.set_parameter(OAuthParameters.CALLBACK, cb_url)
self.assertEqual(self.oauth_server.get_callback(request), cb_url)
def test_build_authenticate_header(self):
self.assertEqual(oauth.build_authenticate_header(), {'WWW-Authenticate':
'OAuth realm=""'})
self.assertEqual(oauth.build_authenticate_header('foo'),
{'WWW-Authenticate': 'OAuth realm="foo"'})
class OAuthClientTest(OAuthTest):
def setUp(self):
super(OAuthClientTest, self).setUp()
self.oauth_client = oauth.OAuthClient(self.consumer, self.token)
def test_get_consumer(self):
consumer = self.oauth_client.get_consumer()
self.assertTrue(isinstance(consumer, oauth.OAuthConsumer))
self.assertEqual(consumer.__dict__, self.consumer.__dict__)
def test_get_token(self):
token = self.oauth_client.get_token()
self.assertTrue(isinstance(token, oauth.OAuthToken))
self.assertEqual(token.__dict__, self.token.__dict__)
#Mockup OAuthDataStore
TOKEN_TYPE_REQUEST = 'request'
TOKEN_TYPE_ACCESS = 'access'
class MockOAuthDataStore(oauth.OAuthDataStore):
def __init__(self):
self.consumer = oauth.OAuthConsumer('a56b5ff0a637ab283d1d8e32ced37a9c',
'9a3248210c84b264b56b98c0b872bc8a')
self.consumer_db = {self.consumer.key: self.consumer}
self.request_token_db = {}
self.access_token_db = {}
self.nonce = None
def lookup_consumer(self, key):
return self.consumer_db.get(key)
def lookup_token(self, oauth_consumer, token_type, token_field):
data = None
if token_type == TOKEN_TYPE_REQUEST:
data = self.request_token_db.get(token_field)
elif token_type == TOKEN_TYPE_ACCESS:
data = self.access_token_db.get(token_field)
if data:
token, consumer, authenticated_user = data
if consumer.key == oauth_consumer.key:
return token
return None
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
is_used = self.nonce == nonce
self.nonce = nonce
return is_used
def fetch_request_token(self, oauth_consumer):
token = oauth.OAuthToken("5b2cafbf20b11bace53b29e37d8a673dRT",
"3f71254637df2002d8819458ae4f6c51RT")
self.request_token_db[token.key] = (token, oauth_consumer, None)
return token
def fetch_access_token(self, oauth_consumer, oauth_token):
data = self.request_token_db.get(oauth_token.key)
if data:
del self.request_token_db[oauth_token.key]
request_token, consumer, authenticated_user = data
access_token = oauth.OAuthToken("5b2cafbf20b11bace53b29e37d8a673dAT",
"3f71254637df2002d8819458ae4f6c51AT")
self.access_token_db[access_token.key] = (access_token,
consumer,
authenticated_user)
return access_token
else:
return None
def authorize_request_token(self, oauth_token, user):
data = self.request_token_db.get(oauth_token.key)
if data and data[2] == None:
request_token, consumer, authenticated_user = data
authenticated_user = user
self.request_token_db[request_token.key] = (request_token,
consumer,
authenticated_user)
return request_token
else:
return None
#Mock user
class MockUser(object):
def __init__(self, name):
self.name = name
def suite():
return test_config.build_suite([OauthUtilsTest,
OAuthConsumerTest,
OAuthTokenTest,
OAuthRequestTest,
OAuthServerTest,
OAuthClientTest])
if __name__ == '__main__':
unittest.main() | Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import sys
import gdata.sample_util
class SettingsUtilTest(unittest.TestCase):
def setUp(self):
self.settings = gdata.sample_util.SettingsUtil()
def test_get_param(self):
self.assert_(self.settings.get_param('missing', ask=False) is None)
self.settings.prefs['x'] = 'something'
self.assertEqual(self.settings.get_param('x'), 'something')
def test_get_param_from_command_line_arg(self):
self.assert_('x' not in self.settings.prefs)
self.assert_(self.settings.get_param('x', ask=False) is None)
sys.argv.append('--x=something')
self.assertEqual(self.settings.get_param('x'), 'something')
self.assert_('x' not in self.settings.prefs)
self.assert_('y' not in self.settings.prefs)
self.assert_(self.settings.get_param('y', ask=False) is None)
sys.argv.append('--y')
sys.argv.append('other')
self.assertEqual(self.settings.get_param('y', reuse=True), 'other')
self.assertEqual(self.settings.prefs['y'], 'other')
def suite():
return conf.build_suite([SettingsUtilTest])
if __name__ == '__main__':
unittest.main()
| Python |
'''
Created on 21.05.2010
'''
__author__ = 'seriyPS (Sergey Prokhorov)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.base.service
import gdata.service
import atom.service
import gdata.base
import atom
TST_XML="""<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gm='http://base.google.com/ns-metadata/1.0'
xmlns:g='http://base.google.com/ns/1.0'
xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets/3804629571624093811</id>
<published>2010-03-28T06:55:40.000Z</published>
<updated>2010-05-23T06:46:27.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'/>
<title type='text'>Cables to Go Ultimate iPod Companion Kit</title>
<content type='html'>Ramp up your portable music experience and enjoyment with the Cables to Go Ultimate iPod Companion Kit. This bundle includes all the connections and accessories necessary to enjoy your iPod-compatible device on your TV or home stereo. Cables to Go ...</content>
<link rel='alternate' type='text/html' href='http://www.hsn.com/redirect.aspx?pfid=1072528&sz=6&sf=EC0210&ac=GPT&cm_mmc=Shopping%20Engine-_-Froogle-_-Electronics-_-5948880&CAWELAID=491871036'/>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/3804629571624093811'/>
<author>
<name>HSN</name>
</author>
<g:condition type='text'>new</g:condition>
<g:product_type type='text'>Electronics>MP3 Players>iPod Accessories</g:product_type>
<g:image_link type='url'>http://dyn-images.hsn.com/is/image/HomeShoppingNetwork/5948880w?$pd500$</g:image_link>
<g:upc type='text'>757120355120</g:upc>
<g:item_language type='text'>EN</g:item_language>
<g:id type='text'>5948880</g:id>
<g:shipping type='shipping'><g:price type='floatUnit'>7.95 usd</g:price>
<g:country type="text">US</g:country>
</g:shipping>
<g:price type='floatUnit'>64.95 usd</g:price>
<g:target_country type='text'>US</g:target_country>
<g:expiration_date type='dateTime'>2010-06-21T00:00:00Z</g:expiration_date>
<g:brand type='text'>Cables to Go</g:brand>
<g:customer_id type='int'>8717</g:customer_id>
<g:item_type type='text'>Products</g:item_type>
</entry>
"""
class Test(unittest.TestCase):
def testConstruct(self):
thumb_url='http://base.googlehosted.com/base_media?q=http%3A%2F%2Fexample.com%2FEOS%2F1AEOS01008.jpg'
item = gdata.base.GBaseItem()
item.title = atom.Title(text='Olds Cutlass Supreme Oxygen O2 Sensor')
item.link.append(atom.Link(rel='alternate', link_type='text/html',
href='http://www.example.com/123456jsh9'))
item.item_type = gdata.base.ItemType(text='Products')
item.AddItemAttribute(name='price', value_type='floatUnit', value='41.94 usd')
item.AddItemAttribute(name='id', value_type='text', value='1AEOS01008-415676-XXX')
item.AddItemAttribute(name='quantity', value_type='int', value='5')
attr=item.AddItemAttribute(name='image_link', value_type='url', value=None)
attr.AddItemAttribute(name='thumb', value=thumb_url, value_type='url')
image_attr=item.GetItemAttributes("image_link")[0]
self.assert_(isinstance(image_attr, gdata.base.ItemAttributeContainer))
self.assert_(isinstance(image_attr.item_attributes[0], gdata.base.ItemAttributeContainer))
self.assert_(isinstance(image_attr.item_attributes[0], gdata.base.ItemAttribute))
self.assert_(image_attr.item_attributes[0].type=='url')
self.assert_(image_attr.item_attributes[0].text==thumb_url)
self.assert_(len(image_attr.item_attributes)==1)
new_item = gdata.base.GBaseItemFromString(item.ToString())
image_attr=item.GetItemAttributes("image_link")[0]
self.assert_(isinstance(image_attr.item_attributes[0], gdata.base.ItemAttributeContainer))
self.assert_(image_attr.item_attributes[0].type=='url')
self.assert_(image_attr.item_attributes[0].text==thumb_url)
self.assert_(len(image_attr.item_attributes)==1)
def testFromXML(self):
item = gdata.base.GBaseItemFromString(TST_XML)
attr=item.GetItemAttributes("shipping")[0]
self.assert_(isinstance(attr, gdata.base.ItemAttributeContainer))
self.assert_(isinstance(attr.item_attributes[0], gdata.base.ItemAttributeContainer))
self.assert_(isinstance(attr.item_attributes[0], gdata.base.ItemAttribute))
self.assert_(attr.item_attributes[0].type=='floatUnit')
self.assert_(len(attr.item_attributes)==2)
if __name__ == "__main__":
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
import getpass
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.base.service
import gdata.service
import atom.service
import gdata.base
import atom
from gdata import test_data
username = ''
password = ''
class GBaseServiceUnitTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.base.service.GBaseService()
self.gd_client.email = username
self.gd_client.password = password
self.gd_client.source = 'BaseClient "Unit" Tests'
self.gd_client.api_key = 'ABQIAAAAoLioN3buSs9KqIIq9VmkFxT2yXp_ZAY8_ufC' +\
'3CFXhHIE1NvwkxRK8C1Q8OWhsWA2AIKv-cVKlVrNhQ'
def tearDown(self):
# No teardown needed
pass
def testProperties(self):
email_string = 'Test Email'
password_string = 'Passwd'
api_key_string = 'my API key'
self.gd_client.email = email_string
self.assertEquals(self.gd_client.email, email_string)
self.gd_client.password = password_string
self.assertEquals(self.gd_client.password, password_string)
self.gd_client.api_key = api_key_string
self.assertEquals(self.gd_client.api_key, api_key_string)
self.gd_client.api_key = None
self.assert_(self.gd_client.api_key is None)
def testQuery(self):
my_query = gdata.base.service.BaseQuery(feed='/base/feeds/snippets')
my_query['max-results'] = '25'
my_query.bq = 'digital camera [item type: products]'
result = self.gd_client.Query(my_query.ToUri())
self.assert_(isinstance(result, atom.Feed))
service = gdata.base.service.GBaseService(username, password)
query = gdata.base.service.BaseQuery()
query.feed = '/base/feeds/snippets'
query.bq = 'digital camera'
feed = service.Query(query.ToUri())
def testQueryWithConverter(self):
my_query = gdata.base.service.BaseQuery(feed='/base/feeds/snippets')
my_query['max-results'] = '1'
my_query.bq = 'digital camera [item type: products]'
result = self.gd_client.Query(my_query.ToUri(),
converter=gdata.base.GBaseSnippetFeedFromString)
self.assert_(isinstance(result, gdata.base.GBaseSnippetFeed))
def testCorrectReturnTypes(self):
q = gdata.base.service.BaseQuery()
q.feed = '/base/feeds/snippets'
q.bq = 'digital camera'
result = self.gd_client.QuerySnippetsFeed(q.ToUri())
self.assert_(isinstance(result, gdata.base.GBaseSnippetFeed))
q.feed = '/base/feeds/attributes'
result = self.gd_client.QueryAttributesFeed(q.ToUri())
self.assert_(isinstance(result, gdata.base.GBaseAttributesFeed))
q = gdata.base.service.BaseQuery()
q.feed = '/base/feeds/itemtypes/en_US'
result = self.gd_client.QueryItemTypesFeed(q.ToUri())
self.assert_(isinstance(result, gdata.base.GBaseItemTypesFeed))
q = gdata.base.service.BaseQuery()
q.feed = '/base/feeds/locales'
result = self.gd_client.QueryLocalesFeed(q.ToUri())
self.assert_(isinstance(result, gdata.base.GBaseLocalesFeed))
def testInsertItemUpdateItemAndDeleteItem(self):
try:
self.gd_client.ProgrammaticLogin()
self.assert_(self.gd_client.GetClientLoginToken() is not None)
self.assert_(self.gd_client.captcha_token is None)
self.assert_(self.gd_client.captcha_url is None)
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
proposed_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
result = self.gd_client.InsertItem(proposed_item)
item_id = result.id.text
self.assertEquals(result.id.text != None, True)
updated_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
updated_item.label[0].text = 'Test Item'
result = self.gd_client.UpdateItem(item_id, updated_item)
# Try to update an incorrect item_id.
try:
result = self.gd_client.UpdateItem(item_id + '2', updated_item)
self.fail()
except gdata.service.RequestError:
pass
result = self.gd_client.DeleteItem(item_id)
self.assert_(result)
# Delete and already deleted item.
try:
result = self.gd_client.DeleteItem(item_id)
self.fail()
except gdata.service.RequestError:
pass
def testInsertItemUpdateItemAndDeleteItemWithConverter(self):
try:
self.gd_client.ProgrammaticLogin()
self.assert_(self.gd_client.GetClientLoginToken() is not None)
self.assert_(self.gd_client.captcha_token is None)
self.assert_(self.gd_client.captcha_url is None)
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
proposed_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
result = self.gd_client.InsertItem(proposed_item,
converter=atom.EntryFromString)
self.assertEquals(isinstance(result, atom.Entry), True)
self.assertEquals(isinstance(result, gdata.base.GBaseItem), False)
item_id = result.id.text
self.assertEquals(result.id.text != None, True)
updated_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
updated_item.label[0].text = 'Test Item'
result = self.gd_client.UpdateItem(item_id, updated_item,
converter=atom.EntryFromString)
self.assertEquals(isinstance(result, atom.Entry), True)
self.assertEquals(isinstance(result, gdata.base.GBaseItem), False)
result = self.gd_client.DeleteItem(item_id)
self.assertEquals(result, True)
def testMakeBatchRequests(self):
try:
self.gd_client.ProgrammaticLogin()
self.assert_(self.gd_client.GetClientLoginToken() is not None)
self.assert_(self.gd_client.captcha_token is None)
self.assert_(self.gd_client.captcha_url is None)
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
request_feed = gdata.base.GBaseItemFeed(atom_id=atom.Id(
text='test batch'))
entry1 = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
entry1.title.text = 'first batch request item'
entry2 = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
entry2.title.text = 'second batch request item'
request_feed.AddInsert(entry1)
request_feed.AddInsert(entry2)
result_feed = self.gd_client.ExecuteBatch(request_feed)
self.assertEquals(result_feed.entry[0].batch_status.code, '201')
self.assertEquals(result_feed.entry[0].batch_status.reason, 'Created')
self.assertEquals(result_feed.entry[0].title.text, 'first batch request item')
self.assertEquals(result_feed.entry[0].item_type.text, 'products')
self.assertEquals(result_feed.entry[1].batch_status.code, '201')
self.assertEquals(result_feed.entry[1].batch_status.reason, 'Created')
self.assertEquals(result_feed.entry[1].title.text, 'second batch request item')
# Now delete the newly created items.
request_feed = gdata.base.GBaseItemFeed(atom_id=atom.Id(
text='test deletions'))
request_feed.AddDelete(entry=result_feed.entry[0])
request_feed.AddDelete(entry=result_feed.entry[1])
self.assertEquals(request_feed.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEquals(request_feed.entry[1].batch_operation.type,
gdata.BATCH_DELETE)
result_feed = self.gd_client.ExecuteBatch(request_feed)
self.assertEquals(result_feed.entry[0].batch_status.code, '200')
self.assertEquals(result_feed.entry[0].batch_status.reason, 'Success')
self.assertEquals(result_feed.entry[0].title.text, 'first batch request item')
self.assertEquals(result_feed.entry[1].batch_status.code, '200')
self.assertEquals(result_feed.entry[1].batch_status.reason, 'Success')
self.assertEquals(result_feed.entry[1].title.text, 'second batch request item')
if __name__ == '__main__':
print ('Google Base Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.eric@google.com (Eric Bidelman)'
import unittest
from gdata import test_data
import gdata.health
import gdata.health.service
class ProfileEntryTest(unittest.TestCase):
def setUp(self):
self.profile_entry = gdata.health.ProfileEntryFromString(
test_data.HEALTH_PROFILE_ENTRY_DIGEST)
def testToAndFromStringWithData(self):
entry = gdata.health.ProfileEntryFromString(str(self.profile_entry))
self.assert_(isinstance(entry, gdata.health.ProfileEntry))
self.assert_(isinstance(entry.ccr, gdata.health.Ccr))
self.assertEqual(len(entry.ccr.GetMedications()), 3)
self.assertEqual(len(entry.ccr.GetImmunizations()), 1)
self.assertEqual(len(entry.ccr.GetAlerts()), 2)
self.assertEqual(len(entry.ccr.GetResults()), 1)
self.assertEqual(len(entry.ccr.GetProblems()), 2)
self.assertEqual(len(entry.ccr.GetProcedures()), 2)
def testGetResultsTextFromCcr(self):
entry = gdata.health.ProfileEntryFromString(str(self.profile_entry))
result = entry.ccr.GetResults()[0].FindChildren('Test')[0]
test_desc = result.FindChildren('Description')[0].FindChildren('Text')[0]
self.assertEqual(test_desc.text, 'Acetaldehyde - Blood')
def testGetMedicationNameFromCcr(self):
entry = gdata.health.ProfileEntryFromString(str(self.profile_entry))
product = entry.ccr.GetMedications()[1].FindChildren('Product')[0]
prod_name = product.FindChildren('ProductName')[0].FindChildren('Text')[0]
self.assertEqual(prod_name.text, 'A-Fil')
def testGetProblemCodeValueFromCcr(self):
entry = gdata.health.ProfileEntryFromString(str(self.profile_entry))
problem_desc = entry.ccr.GetProblems()[1].FindChildren('Description')[0]
code = problem_desc.FindChildren('Code')[0].FindChildren('Value')[0]
self.assertEqual(code.text, '136.9')
def testGetGetImmunizationActorIdFromCcr(self):
entry = gdata.health.ProfileEntryFromString(str(self.profile_entry))
immun_source = entry.ccr.GetImmunizations()[0].FindChildren('Source')[0]
actor_id = immun_source.FindChildren('Actor')[0].FindChildren('ActorID')[0]
self.assertEqual(actor_id.text, 'user@gmail.com')
def testGetGetProceduresNameFromCcr(self):
entry = gdata.health.ProfileEntryFromString(str(self.profile_entry))
proc_desc = entry.ccr.GetProcedures()[1].FindChildren('Description')[0]
proc_name = proc_desc.FindChildren('Text')[0]
self.assertEqual(proc_name.text, 'Abdominoplasty')
def testGetAlertsFromCcr(self):
entry = gdata.health.ProfileEntryFromString(str(self.profile_entry))
alert_type = entry.ccr.GetAlerts()[0].FindChildren('Type')[0]
self.assertEqual(alert_type.FindChildren('Text')[0].text, 'Allergy')
class ProfileListEntryTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.health.ProfileListEntryFromString(
test_data.HEALTH_PROFILE_LIST_ENTRY)
def testToAndFromString(self):
self.assert_(isinstance(self.entry, gdata.health.ProfileListEntry))
self.assertEqual(self.entry.GetProfileId(), 'vndCn5sdfwdEIY')
self.assertEqual(self.entry.GetProfileName(), 'profile name')
class ProfileFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.health.ProfileFeedFromString(
test_data.HEALTH_PROFILE_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 15)
for an_entry in self.feed.entry:
self.assert_(isinstance(an_entry, gdata.health.ProfileEntry))
new_profile_feed = gdata.health.ProfileFeedFromString(str(self.feed))
for an_entry in new_profile_feed.entry:
self.assert_(isinstance(an_entry, gdata.health.ProfileEntry))
def testConvertActualData(self):
for an_entry in self.feed.entry:
self.assert_(an_entry.ccr is not None)
class HealthProfileQueryTest(unittest.TestCase):
def testHealthQueryToString(self):
query = gdata.health.service.HealthProfileQuery()
self.assertEqual(query.ToUri(), '/health/feeds/profile/default')
query = gdata.health.service.HealthProfileQuery(feed='feeds/profile')
self.assertEqual(query.ToUri(), '/health/feeds/profile/default')
query = gdata.health.service.HealthProfileQuery(categories=['medication'])
self.assertEqual(query.ToUri(),
'/health/feeds/profile/default/-/medication')
query = gdata.health.service.HealthProfileQuery(projection='ui',
profile_id='12345')
self.assertEqual(query.ToUri(), '/health/feeds/profile/ui/12345')
query = gdata.health.service.HealthProfileQuery()
query.categories.append('medication|condition')
self.assertEqual(query.ToUri(),
'/health/feeds/profile/default/-/medication%7Ccondition')
def testH9QueryToString(self):
query = gdata.health.service.HealthProfileQuery(service='h9')
self.assertEqual(query.ToUri(), '/h9/feeds/profile/default')
query = gdata.health.service.HealthProfileQuery(
service='h9', feed='feeds/profile',
projection='ui', profile_id='12345')
self.assertEqual(query.ToUri(), '/h9/feeds/profile/ui/12345')
def testDigestParam(self):
query = gdata.health.service.HealthProfileQuery(params={'digest': 'true'})
self.assertEqual(query.ToUri(), '/health/feeds/profile/default?digest=true')
query.profile_id = '12345'
query.projection = 'ui'
self.assertEqual(
query.ToUri(), '/health/feeds/profile/ui/12345?digest=true')
class HealthProfileListQueryTest(unittest.TestCase):
def testHealthProfileListQueryToString(self):
query = gdata.health.service.HealthProfileListQuery()
self.assertEqual(query.ToUri(), '/health/feeds/profile/list')
query = gdata.health.service.HealthProfileListQuery(service='health')
self.assertEqual(query.ToUri(), '/health/feeds/profile/list')
query = gdata.health.service.HealthProfileListQuery(
feed='feeds/profile/list')
self.assertEqual(query.ToUri(), '/health/feeds/profile/list')
query = gdata.health.service.HealthProfileListQuery(
service='health', feed='feeds/profile/list')
self.assertEqual(query.ToUri(), '/health/feeds/profile/list')
def testH9ProfileListQueryToString(self):
query = gdata.health.service.HealthProfileListQuery(service='h9')
self.assertEqual(query.ToUri(), '/h9/feeds/profile/list')
query = gdata.health.service.HealthProfileListQuery(
service='h9', feed='feeds/profile/list')
self.assertEqual(query.ToUri(), '/h9/feeds/profile/list')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.test_config as conf
import gdata.data
import gdata.acl.data
import gdata.analytics.data
import gdata.dublincore.data
import gdata.books.data
import gdata.calendar.data
import gdata.geo.data
import gdata.finance.data
import gdata.notebook.data
import gdata.media.data
import gdata.youtube.data
import gdata.webmastertools.data
import gdata.contacts.data
import gdata.opensearch.data
class DataSmokeTest(unittest.TestCase):
def test_check_all_data_classes(self):
conf.check_data_classes(self, (
gdata.data.TotalResults, gdata.data.StartIndex,
gdata.data.ItemsPerPage, gdata.data.ExtendedProperty,
gdata.data.GDEntry, gdata.data.GDFeed, gdata.data.BatchId,
gdata.data.BatchOperation, gdata.data.BatchStatus,
gdata.data.BatchEntry, gdata.data.BatchInterrupted,
gdata.data.BatchFeed, gdata.data.EntryLink, gdata.data.FeedLink,
gdata.data.AdditionalName, gdata.data.Comments, gdata.data.Country,
gdata.data.Email, gdata.data.FamilyName, gdata.data.Im,
gdata.data.GivenName, gdata.data.NamePrefix, gdata.data.NameSuffix,
gdata.data.FullName, gdata.data.Name, gdata.data.OrgDepartment,
gdata.data.OrgName, gdata.data.OrgSymbol, gdata.data.OrgTitle,
gdata.data.Organization, gdata.data.When, gdata.data.Who,
gdata.data.OriginalEvent, gdata.data.PhoneNumber,
gdata.data.PostalAddress, gdata.data.Rating, gdata.data.Recurrence,
gdata.data.RecurrenceException, gdata.data.Reminder,
gdata.data.Agent, gdata.data.HouseName, gdata.data.Street,
gdata.data.PoBox, gdata.data.Neighborhood, gdata.data.City,
gdata.data.Subregion, gdata.data.Region, gdata.data.Postcode,
gdata.data.Country, gdata.data.FormattedAddress,
gdata.data.StructuredPostalAddress, gdata.data.Where,
gdata.data.AttendeeType, gdata.data.AttendeeStatus,
gdata.data.Deleted, gdata.data.Money,
gdata.acl.data.AclRole, gdata.acl.data.AclScope,
gdata.acl.data.AclWithKey,
gdata.acl.data.AclEntry, gdata.acl.data.AclFeed,
gdata.analytics.data.Dimension,
gdata.analytics.data.EndDate,
gdata.analytics.data.Metric,
gdata.analytics.data.Aggregates,
gdata.analytics.data.DataEntry,
gdata.analytics.data.Property,
gdata.analytics.data.StartDate,
gdata.analytics.data.TableId,
gdata.analytics.data.AccountEntry,
gdata.analytics.data.TableName,
gdata.analytics.data.DataSource,
gdata.analytics.data.AccountFeed,
gdata.analytics.data.DataFeed,
gdata.dublincore.data.Creator,
gdata.dublincore.data.Date,
gdata.dublincore.data.Description,
gdata.dublincore.data.Format,
gdata.dublincore.data.Identifier,
gdata.dublincore.data.Language,
gdata.dublincore.data.Publisher,
gdata.dublincore.data.Rights,
gdata.dublincore.data.Subject,
gdata.dublincore.data.Title,
gdata.books.data.CollectionEntry,
gdata.books.data.CollectionFeed,
gdata.books.data.Embeddability,
gdata.books.data.OpenAccess,
gdata.books.data.Review,
gdata.books.data.Viewability,
gdata.books.data.VolumeEntry,
gdata.books.data.VolumeFeed,
gdata.calendar.data.AccessLevelProperty,
gdata.calendar.data.AllowGSync2Property,
gdata.calendar.data.AllowGSyncProperty,
gdata.calendar.data.AnyoneCanAddSelfProperty,
gdata.calendar.data.CalendarAclRole,
gdata.calendar.data.CalendarCommentEntry,
gdata.calendar.data.CalendarCommentFeed,
gdata.calendar.data.CalendarComments,
gdata.calendar.data.CalendarExtendedProperty,
gdata.calendar.data.CalendarWhere,
gdata.calendar.data.ColorProperty,
gdata.calendar.data.GuestsCanInviteOthersProperty,
gdata.calendar.data.GuestsCanModifyProperty,
gdata.calendar.data.GuestsCanSeeGuestsProperty,
gdata.calendar.data.HiddenProperty,
gdata.calendar.data.IcalUIDProperty,
gdata.calendar.data.OverrideNameProperty,
gdata.calendar.data.PrivateCopyProperty,
gdata.calendar.data.QuickAddProperty,
gdata.calendar.data.ResourceProperty,
gdata.calendar.data.EventWho,
gdata.calendar.data.SelectedProperty,
gdata.calendar.data.SendAclNotificationsProperty,
gdata.calendar.data.CalendarAclEntry,
gdata.calendar.data.CalendarAclFeed,
gdata.calendar.data.SendEventNotificationsProperty,
gdata.calendar.data.SequenceNumberProperty,
gdata.calendar.data.CalendarRecurrenceExceptionEntry,
gdata.calendar.data.CalendarRecurrenceException,
gdata.calendar.data.SettingsProperty,
gdata.calendar.data.SettingsEntry,
gdata.calendar.data.CalendarSettingsFeed,
gdata.calendar.data.SuppressReplyNotificationsProperty,
gdata.calendar.data.SyncEventProperty,
gdata.calendar.data.CalendarEventEntry,
gdata.calendar.data.TimeZoneProperty,
gdata.calendar.data.TimesCleanedProperty,
gdata.calendar.data.CalendarEntry,
gdata.calendar.data.CalendarEventFeed,
gdata.calendar.data.CalendarFeed,
gdata.calendar.data.WebContentGadgetPref,
gdata.calendar.data.WebContent,
gdata.finance.data.Commission,
gdata.finance.data.CostBasis,
gdata.finance.data.DaysGain,
gdata.finance.data.Gain,
gdata.finance.data.MarketValue,
gdata.finance.data.PortfolioData,
gdata.finance.data.PortfolioEntry,
gdata.finance.data.PortfolioFeed,
gdata.finance.data.PositionData,
gdata.finance.data.Price,
gdata.finance.data.Symbol,
gdata.finance.data.PositionEntry,
gdata.finance.data.PositionFeed,
gdata.finance.data.TransactionData,
gdata.finance.data.TransactionEntry,
gdata.finance.data.TransactionFeed,
gdata.notebook.data.ComesAfter,
gdata.notebook.data.NoteEntry,
gdata.notebook.data.NotebookFeed,
gdata.notebook.data.NotebookListEntry,
gdata.notebook.data.NotebookListFeed,
gdata.youtube.data.ComplaintEntry,
gdata.youtube.data.ComplaintFeed,
gdata.youtube.data.RatingEntry,
gdata.youtube.data.RatingFeed,
gdata.youtube.data.YouTubeMediaContent,
gdata.youtube.data.YtAge,
gdata.youtube.data.YtBooks,
gdata.youtube.data.YtCompany,
gdata.youtube.data.YtDescription,
gdata.youtube.data.YtDuration,
gdata.youtube.data.YtFirstName,
gdata.youtube.data.YtGender,
gdata.youtube.data.YtHobbies,
gdata.youtube.data.YtHometown,
gdata.youtube.data.YtLastName,
gdata.youtube.data.YtLocation,
gdata.youtube.data.YtMovies,
gdata.youtube.data.YtMusic,
gdata.youtube.data.YtNoEmbed,
gdata.youtube.data.YtOccupation,
gdata.youtube.data.YtPlaylistId,
gdata.youtube.data.YtPosition,
gdata.youtube.data.YtPrivate,
gdata.youtube.data.YtQueryString,
gdata.youtube.data.YtRacy,
gdata.youtube.data.YtRecorded,
gdata.youtube.data.YtRelationship,
gdata.youtube.data.YtSchool,
gdata.youtube.data.YtStatistics,
gdata.youtube.data.YtStatus,
gdata.youtube.data.YtUserProfileStatistics,
gdata.youtube.data.YtUsername,
gdata.youtube.data.FriendEntry,
gdata.youtube.data.FriendFeed,
gdata.youtube.data.YtVideoStatistics,
gdata.youtube.data.ChannelEntry,
gdata.youtube.data.ChannelFeed,
gdata.youtube.data.FavoriteEntry,
gdata.youtube.data.FavoriteFeed,
gdata.youtube.data.YouTubeMediaCredit,
gdata.youtube.data.YouTubeMediaRating,
gdata.youtube.data.YtAboutMe,
gdata.youtube.data.UserProfileEntry,
gdata.youtube.data.UserProfileFeed,
gdata.youtube.data.YtAspectRatio,
gdata.youtube.data.YtBasePublicationState,
gdata.youtube.data.YtPublicationState,
gdata.youtube.data.YouTubeAppControl,
gdata.youtube.data.YtCaptionPublicationState,
gdata.youtube.data.YouTubeCaptionAppControl,
gdata.youtube.data.CaptionTrackEntry,
gdata.youtube.data.CaptionTrackFeed,
gdata.youtube.data.YtCountHint,
gdata.youtube.data.PlaylistLinkEntry,
gdata.youtube.data.PlaylistLinkFeed,
gdata.youtube.data.YtModerationStatus,
gdata.youtube.data.YtPlaylistTitle,
gdata.youtube.data.SubscriptionEntry,
gdata.youtube.data.SubscriptionFeed,
gdata.youtube.data.YtSpam,
gdata.youtube.data.CommentEntry,
gdata.youtube.data.CommentFeed,
gdata.youtube.data.YtUploaded,
gdata.youtube.data.YtVideoId,
gdata.youtube.data.YouTubeMediaGroup,
gdata.youtube.data.VideoEntryBase,
gdata.youtube.data.PlaylistEntry,
gdata.youtube.data.PlaylistFeed,
gdata.youtube.data.VideoEntry,
gdata.youtube.data.VideoFeed,
gdata.youtube.data.VideoMessageEntry,
gdata.youtube.data.VideoMessageFeed,
gdata.youtube.data.UserEventEntry,
gdata.youtube.data.UserEventFeed,
gdata.youtube.data.VideoModerationEntry,
gdata.youtube.data.VideoModerationFeed,
gdata.media.data.MediaCategory,
gdata.media.data.MediaCopyright,
gdata.media.data.MediaCredit,
gdata.media.data.MediaDescription,
gdata.media.data.MediaHash,
gdata.media.data.MediaKeywords,
gdata.media.data.MediaPlayer,
gdata.media.data.MediaRating,
gdata.media.data.MediaRestriction,
gdata.media.data.MediaText,
gdata.media.data.MediaThumbnail,
gdata.media.data.MediaTitle,
gdata.media.data.MediaContent,
gdata.media.data.MediaGroup,
gdata.webmastertools.data.CrawlIssueCrawlType,
gdata.webmastertools.data.CrawlIssueDateDetected,
gdata.webmastertools.data.CrawlIssueDetail,
gdata.webmastertools.data.CrawlIssueIssueType,
gdata.webmastertools.data.CrawlIssueLinkedFromUrl,
gdata.webmastertools.data.CrawlIssueUrl,
gdata.webmastertools.data.CrawlIssueEntry,
gdata.webmastertools.data.CrawlIssuesFeed,
gdata.webmastertools.data.Indexed,
gdata.webmastertools.data.Keyword,
gdata.webmastertools.data.KeywordEntry,
gdata.webmastertools.data.KeywordsFeed,
gdata.webmastertools.data.LastCrawled,
gdata.webmastertools.data.MessageBody,
gdata.webmastertools.data.MessageDate,
gdata.webmastertools.data.MessageLanguage,
gdata.webmastertools.data.MessageRead,
gdata.webmastertools.data.MessageSubject,
gdata.webmastertools.data.SiteId,
gdata.webmastertools.data.MessageEntry,
gdata.webmastertools.data.MessagesFeed,
gdata.webmastertools.data.SitemapEntry,
gdata.webmastertools.data.SitemapMobileMarkupLanguage,
gdata.webmastertools.data.SitemapMobile,
gdata.webmastertools.data.SitemapNewsPublicationLabel,
gdata.webmastertools.data.SitemapNews,
gdata.webmastertools.data.SitemapType,
gdata.webmastertools.data.SitemapUrlCount,
gdata.webmastertools.data.SitemapsFeed,
gdata.webmastertools.data.VerificationMethod,
gdata.webmastertools.data.Verified,
gdata.webmastertools.data.SiteEntry,
gdata.webmastertools.data.SitesFeed,
gdata.contacts.data.BillingInformation,
gdata.contacts.data.Birthday,
gdata.contacts.data.CalendarLink,
gdata.contacts.data.DirectoryServer,
gdata.contacts.data.Event,
gdata.contacts.data.ExternalId,
gdata.contacts.data.Gender,
gdata.contacts.data.Hobby,
gdata.contacts.data.Initials,
gdata.contacts.data.Jot,
gdata.contacts.data.Language,
gdata.contacts.data.MaidenName,
gdata.contacts.data.Mileage,
gdata.contacts.data.NickName,
gdata.contacts.data.Occupation,
gdata.contacts.data.Priority,
gdata.contacts.data.Relation,
gdata.contacts.data.Sensitivity,
gdata.contacts.data.UserDefinedField,
gdata.contacts.data.Website,
gdata.contacts.data.HouseName,
gdata.contacts.data.Street,
gdata.contacts.data.POBox,
gdata.contacts.data.Neighborhood,
gdata.contacts.data.City,
gdata.contacts.data.SubRegion,
gdata.contacts.data.Region,
gdata.contacts.data.PostalCode,
gdata.contacts.data.Country,
gdata.contacts.data.PersonEntry,
gdata.contacts.data.Deleted,
gdata.contacts.data.GroupMembershipInfo,
gdata.contacts.data.ContactEntry,
gdata.contacts.data.ContactsFeed,
gdata.contacts.data.SystemGroup,
gdata.contacts.data.GroupEntry,
gdata.contacts.data.GroupsFeed,
gdata.contacts.data.ProfileEntry,
gdata.contacts.data.ProfilesFeed,
gdata.opensearch.data.ItemsPerPage,
gdata.opensearch.data.StartIndex,
gdata.opensearch.data.TotalResults,
))
def suite():
return conf.build_suite([DataSmokeTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
import gdata.codesearch
import gdata.test_data
class CodeSearchDataTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.codesearch.CodesearchFeedFromString(
gdata.test_data.CODE_SEARCH_FEED)
def testCorrectXmlConversion(self):
self.assert_(self.feed.id.text ==
'http://www.google.com/codesearch/feeds/search?q=malloc')
self.assert_(len(self.feed.entry) == 10)
for entry in self.feed.entry:
if entry.id.text == ('http://www.google.com/codesearch?hl=en&q=+ma'
'lloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1'
'&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoco'
'nf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=softwa'
're/autoconf/manual/autoconf-2.60/autoconf.html-002#first'):
self.assert_(len(entry.match) == 4)
for match in entry.match:
if match.line_number == '4':
self.assert_(match.type == 'text/html')
self.assert_(entry.file.name ==
'software/autoconf/manual/autoconf-2.60/autoconf.html-002')
self.assert_(entry.package.name == 'http://www.gnu.org')
self.assert_(entry.package.uri == 'http://www.gnu.org')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright Google 2007-2008, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import getpass
import gdata.spreadsheet.text_db
import gdata.spreadsheet.service
__author__ = 'api.jscudder (Jeffrey Scudder)'
username = ''
password = ''
class FactoryTest(unittest.TestCase):
def setUp(self):
self.client = gdata.spreadsheet.text_db.DatabaseClient()
def testBadCredentials(self):
try:
self.client.SetCredentials('foo', 'bar')
self.fail()
except gdata.spreadsheet.text_db.Error, e:
pass
def testCreateGetAndDeleteDatabase(self):
db_title = 'google_spreadsheets_db unit test 1'
self.client.SetCredentials(username, password)
db = self.client.CreateDatabase(db_title)
# Test finding the database using the name
time.sleep(5)
db_list = self.client.GetDatabases(name=db_title)
self.assert_(len(db_list) >= 1)
if len(db_list) >= 1:
self.assert_(db_list[0].entry.title.text == db_title)
# Test finding the database using the spreadsheet key
db_list = self.client.GetDatabases(spreadsheet_key=db.spreadsheet_key)
self.assert_(len(db_list) == 1)
self.assert_(db_list[0].entry.title.text == db_title)
# Delete the test spreadsheet
time.sleep(10)
db.Delete()
class DatabaseTest(unittest.TestCase):
def setUp(self):
client = gdata.spreadsheet.text_db.DatabaseClient(username, password)
self.db = client.CreateDatabase('google_spreadsheets_db unit test 2')
def tearDown(self):
time.sleep(10)
self.db.Delete()
def testCreateGetAndDeleteTable(self):
table = self.db.CreateTable('test1', ['1','2','3'])
# Try to get the new table using the worksheet id.
table_list = self.db.GetTables(worksheet_id=table.worksheet_id)
self.assert_(len(table_list) == 1)
self.assert_(table_list[0].entry.title.text, 'test1')
# Try to get the table using the name
table_list = self.db.GetTables(name='test1')
self.assert_(len(table_list) == 1)
self.assert_(table_list[0].entry.title.text, 'test1')
# Delete the table
table.Delete()
class TableTest(unittest.TestCase):
def setUp(self):
client = gdata.spreadsheet.text_db.DatabaseClient(username, password)
self.db = client.CreateDatabase('google_spreadsheets_db unit test 3')
self.table = self.db.CreateTable('test1', ['a','b','c_d','a', 'd:e'])
def tearDown(self):
time.sleep(10)
self.db.Delete()
def testCreateGetAndDeleteRecord(self):
new_record = self.table.AddRecord({'a':'test1', 'b':'test2', 'cd':'test3', 'a_2':'test4', 'de':'test5'})
# Test getting record by line number.
record = self.table.GetRecord(row_number=1)
self.assert_(record is not None)
self.assert_(record.content['a'] == 'test1')
self.assert_(record.content['b'] == 'test2')
self.assert_(record.content['cd'] == 'test3')
self.assert_(record.content['a_2'] == 'test4')
# Test getting record using the id.
record_list = self.table.GetRecord(row_id=new_record.row_id)
self.assert_(record is not None)
# Delete the record.
time.sleep(10)
new_record.Delete()
def testPushPullSyncing(self):
# Get two copies of the same row.
first_copy = self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'})
second_copy = self.table.GetRecord(first_copy.row_id)
# Make changes in the first copy
first_copy.content['a'] = '7'
first_copy.content['b'] = '9'
# Try to get the changes before they've been committed
second_copy.Pull()
self.assert_(second_copy.content['a'] == '1')
self.assert_(second_copy.content['b'] == '2')
# Commit the changes, the content should now be different
first_copy.Push()
second_copy.Pull()
self.assert_(second_copy.content['a'] == '7')
self.assert_(second_copy.content['b'] == '9')
# Make changes to the second copy, push, then try to push changes from
# the first copy.
first_copy.content['a'] = '10'
second_copy.content['a'] = '15'
first_copy.Push()
try:
second_copy.Push()
# The second update should raise and exception due to a 409 conflict.
self.fail()
except gdata.spreadsheet.service.RequestError:
pass
except Exception, error:
#TODO: Why won't the except RequestError catch this?
pass
def testFindRecords(self):
# Add lots of test records:
self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'hi', 'b':'2', 'cd':'20', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'3'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'hi hi hi', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'"5"', 'b':'5', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'5', 'b':'5', 'cd':'15', 'de':'7'})
time.sleep(10)
matches = self.table.FindRecords('a == 1')
self.assert_(len(matches) == 1)
self.assert_(matches[0].content['a'] == '1')
self.assert_(matches[0].content['b'] == '2')
matches = self.table.FindRecords('a > 1 && cd < 20')
self.assert_(len(matches) == 4)
matches = self.table.FindRecords('cd < de')
self.assert_(len(matches) == 7)
matches = self.table.FindRecords('a == b')
self.assert_(len(matches) == 0)
matches = self.table.FindRecords('a == 5')
self.assert_(len(matches) == 1)
def testIterateResultSet(self):
# Populate the table with test data.
self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'hi', 'b':'2', 'cd':'20', 'a_2':'4', 'de':'5'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'3'})
self.table.AddRecord({'a':'2', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'hi hi hi', 'b':'2', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'"5"', 'b':'5', 'cd':'15', 'de':'7'})
self.table.AddRecord({'a':'5', 'b':'5', 'cd':'15', 'de':'7'})
# Get the first two rows.
records = self.table.GetRecords(1, 2)
self.assert_(len(records) == 2)
self.assert_(records[0].content['a'] == '1')
self.assert_(records[1].content['a'] == 'hi')
# Then get the next two rows.
next_records = records.GetNext()
self.assert_(len(next_records) == 2)
self.assert_(next_records[0].content['a'] == '2')
self.assert_(next_records[0].content['cd'] == '3')
self.assert_(next_records[1].content['cd'] == '15')
self.assert_(next_records[1].content['de'] == '7')
def testLookupFieldsOnPreexistingTable(self):
existing_table = self.db.GetTables(name='test1')[0]
existing_table.LookupFields()
self.assertEquals(existing_table.fields, ['a', 'b', 'cd', 'a_2', 'de'])
if __name__ == '__main__':
if not username:
username = raw_input('Spreadsheets API | Text DB Tests\n'
'Please enter your username: ')
if not password:
password = getpass.getpass()
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.spreadsheet.service
import gdata.service
import atom.service
import gdata.spreadsheet
import atom
import getpass
username = ''
password = ''
ss_key = ''
ws_key = ''
class DocumentQueryTest(unittest.TestCase):
def setUp(self):
self.query = gdata.spreadsheet.service.DocumentQuery()
def testTitle(self):
self.query['title'] = 'my title'
self.assert_(self.query['title'] == 'my title')
self.assert_(self.query.ToUri() == '?title=my+title')
def testTitleExact(self):
self.query['title-exact'] = 'true'
self.assert_(self.query['title-exact'] == 'true')
self.assert_(self.query.ToUri() == '?title-exact=true')
class CellQueryTest(unittest.TestCase):
def setUp(self):
self.query = gdata.spreadsheet.service.CellQuery()
def testMinRow(self):
self.query['min-row'] = '1'
self.assert_(self.query['min-row'] == '1')
self.assert_(self.query.ToUri() == '?min-row=1')
def testMaxRow(self):
self.query['max-row'] = '100'
self.assert_(self.query['max-row'] == '100')
self.assert_(self.query.ToUri() == '?max-row=100')
def testMinCol(self):
self.query['min-col'] = '2'
self.assert_(self.query['min-col'] == '2')
self.assert_(self.query.ToUri() == '?min-col=2')
def testMaxCol(self):
self.query['max-col'] = '20'
self.assert_(self.query['max-col'] == '20')
self.assert_(self.query.ToUri() == '?max-col=20')
def testRange(self):
self.query['range'] = 'A1:B4'
self.assert_(self.query['range'] == 'A1:B4')
self.assert_(self.query.ToUri() == '?range=A1%3AB4')
def testReturnEmpty(self):
self.query['return-empty'] = 'false'
self.assert_(self.query['return-empty'] == 'false')
self.assert_(self.query.ToUri() == '?return-empty=false')
class ListQueryTest(unittest.TestCase):
def setUp(self):
self.query = gdata.spreadsheet.service.ListQuery()
def testSpreadsheetQuery(self):
self.query['sq'] = 'first=john&last=smith'
self.assert_(self.query['sq'] == 'first=john&last=smith')
self.assert_(self.query.ToUri() == '?sq=first%3Djohn%26last%3Dsmith')
def testOrderByQuery(self):
self.query['orderby'] = 'column:first'
self.assert_(self.query['orderby'] == 'column:first')
self.assert_(self.query.ToUri() == '?orderby=column%3Afirst')
def testReverseQuery(self):
self.query['reverse'] = 'true'
self.assert_(self.query['reverse'] == 'true')
self.assert_(self.query.ToUri() == '?reverse=true')
class SpreadsheetsServiceTest(unittest.TestCase):
def setUp(self):
self.key = ss_key
self.worksheet = ws_key
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
self.gd_client.email = username
self.gd_client.password = password
self.gd_client.source = 'SpreadsheetsClient "Unit" Tests'
self.gd_client.ProgrammaticLogin()
def testGetSpreadsheetsFeed(self):
entry = self.gd_client.GetSpreadsheetsFeed(self.key)
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsSpreadsheet))
def testGetWorksheetsFeed(self):
feed = self.gd_client.GetWorksheetsFeed(self.key)
self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsWorksheetsFeed))
entry = self.gd_client.GetWorksheetsFeed(self.key, self.worksheet)
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsWorksheet))
def testGetCellsFeed(self):
feed = self.gd_client.GetCellsFeed(self.key)
self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed))
entry = self.gd_client.GetCellsFeed(self.key, cell='R5C1')
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsCell))
def testGetListFeed(self):
feed = self.gd_client.GetListFeed(self.key)
self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed))
entry = self.gd_client.GetListFeed(self.key, row_id='cpzh4')
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsList))
def testUpdateCell(self):
self.gd_client.UpdateCell(row='5', col='1', inputValue='', key=self.key)
self.gd_client.UpdateCell(row='5', col='1', inputValue='newer data',
key=self.key)
def testBatchUpdateCell(self):
cell_feed = self.gd_client.GetCellsFeed(key=self.key)
edit_cell = cell_feed.entry[0]
old_cell_value = 'a1'
# Create a batch request to change the contents of a cell.
batch_feed = gdata.spreadsheet.SpreadsheetsCellsFeed()
edit_cell.cell.inputValue = 'New Value'
batch_feed.AddUpdate(edit_cell)
result = self.gd_client.ExecuteBatch(batch_feed,
url=cell_feed.GetBatchLink().href)
self.assertEquals(len(result.entry), 1)
self.assertEquals(result.entry[0].cell.inputValue, 'New Value')
# Make a second batch request to change the cell's value back.
edit_cell = result.entry[0]
edit_cell.cell.inputValue = old_cell_value
batch_feed = gdata.spreadsheet.SpreadsheetsCellsFeed()
batch_feed.AddUpdate(edit_cell)
restored = self.gd_client.ExecuteBatch(batch_feed,
url=cell_feed.GetBatchLink().href)
self.assertEquals(len(restored.entry), 1)
self.assertEquals(restored.entry[0].cell.inputValue, old_cell_value)
def testInsertUpdateRow(self):
entry = self.gd_client.InsertRow({'a1':'new', 'b1':'row', 'c1':'was',
'd1':'here'}, self.key)
entry = self.gd_client.UpdateRow(entry, {'a1':'newer',
'b1':entry.custom['b1'].text, 'c1':entry.custom['c1'].text,
'd1':entry.custom['d1'].text})
self.gd_client.DeleteRow(entry)
def testWorksheetCRUD(self):
# Add a new worksheet.
new_worksheet = self.gd_client.AddWorksheet('worksheet_title_test_12', '2',
3, self.key)
self.assertEquals(new_worksheet.col_count.text, '3')
self.assertEquals(new_worksheet.row_count.text, '2')
self.assertEquals(new_worksheet.title.text, 'worksheet_title_test_12')
# Change the dimensions and title of the new worksheet.
new_worksheet.col_count.text = '1'
new_worksheet.title.text = 'edited worksheet test12'
edited_worksheet = self.gd_client.UpdateWorksheet(new_worksheet)
self.assertEquals(edited_worksheet.col_count.text, '1')
self.assertEquals(edited_worksheet.row_count.text, '2')
self.assertEquals(edited_worksheet.title.text, 'edited worksheet test12')
# Delete the new worksheet.
result = self.gd_client.DeleteWorksheet(edited_worksheet)
self.assertEquals(result, True)
if __name__ == '__main__':
print ('Spreadsheet Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
print ('These tests must be run on a sheet with this data:\n'
'a1,b1,c1,d1\n'
'1,2,3,4')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
ss_key = raw_input('Please enter your spreadsheet key: ')
ws_key = raw_input('Please enter your worksheet key (usually od6): ')
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
import getpass
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.service
import gdata
import gdata.auth
import atom
import atom.service
import atom.token_store
import gdata.base
import os.path
from gdata import test_data
import atom.mock_http
import atom.mock_http_core
username = ''
password = ''
test_image_location = '../testimage.jpg'
test_image_name = 'testimage.jpg'
class GDataServiceMediaUnitTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.service.GDataService()
self.gd_client.email = username
self.gd_client.password = password
self.gd_client.service = 'lh2'
self.gd_client.source = 'GDataService Media "Unit" Tests'
try:
self.gd_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
except gdata.service.BadAuthentication:
self.fail('Bad Authentication')
except gdata.service.Error:
self.fail('Login Error')
# create a test album
gd_entry = gdata.GDataEntry()
gd_entry.title = atom.Title(text='GData Test Album')
gd_entry.category.append(atom.Category(
scheme='http://schemas.google.com/g/2005#kind',
term='http://schemas.google.com/photos/2007#album'))
self.album_entry = self.gd_client.Post(gd_entry,
'http://picasaweb.google.com/data/feed/api/user/' + username)
def tearDown(self):
album_entry = self.gd_client.Get(self.album_entry.id.text)
self.gd_client.Delete(album_entry.GetEditLink().href)
def testSourceGeneratesUserAgentHeader(self):
self.gd_client.source = 'GoogleInc-ServiceUnitTest-1'
self.assert_(self.gd_client.additional_headers['User-Agent'].startswith(
'GoogleInc-ServiceUnitTest-1 GData-Python'))
def testMedia1(self):
# Create media-only
ms = gdata.MediaSource()
ms.setFile(test_image_location, 'image/jpeg')
media_entry = self.gd_client.Post(None,
self.album_entry.GetFeedLink().href, media_source = ms)
self.assert_(media_entry is not None)
self.assert_(isinstance(media_entry, gdata.GDataEntry))
self.assert_(media_entry.IsMedia())
# Update media & metadata
ms = gdata.MediaSource()
ms.setFile(test_image_location, 'image/jpeg')
media_entry.summary = atom.Summary(text='Test Image')
media_entry2 = self.gd_client.Put(media_entry,
media_entry.GetEditLink().href, media_source = ms)
self.assert_(media_entry2 is not None)
self.assert_(isinstance(media_entry2, gdata.GDataEntry))
self.assert_(media_entry2.IsMedia())
self.assert_(media_entry2.summary.text == 'Test Image')
# Read media binary
imageSource = self.gd_client.GetMedia(media_entry2.GetMediaURL())
self.assert_(isinstance(imageSource, gdata.MediaSource))
self.assert_(imageSource.content_type == 'image/jpeg')
self.assert_(imageSource.content_length)
imageData = imageSource.file_handle.read()
self.assert_(imageData)
# Delete entry
response = self.gd_client.Delete(media_entry2.GetEditLink().href)
self.assert_(response)
def testMedia2(self):
# Create media & metadata
ms = gdata.MediaSource()
ms.setFile(test_image_location, 'image/jpeg')
new_media_entry = gdata.GDataEntry()
new_media_entry.title = atom.Title(text='testimage1.jpg')
new_media_entry.summary = atom.Summary(text='Test Image')
new_media_entry.category.append(atom.Category(scheme =
'http://schemas.google.com/g/2005#kind', term =
'http://schemas.google.com/photos/2007#photo'))
media_entry = self.gd_client.Post(new_media_entry,
self.album_entry.GetFeedLink().href, media_source = ms)
self.assert_(media_entry is not None)
self.assert_(isinstance(media_entry, gdata.GDataEntry))
self.assert_(media_entry.IsMedia())
self.assert_(media_entry.summary.text == 'Test Image')
# Update media only
ms = gdata.MediaSource()
ms.setFile(test_image_location, 'image/jpeg')
media_entry = self.gd_client.Put(None, media_entry.GetEditMediaLink().href,
media_source = ms)
self.assert_(media_entry is not None)
self.assert_(isinstance(media_entry, gdata.GDataEntry))
self.assert_(media_entry.IsMedia())
# Delete entry
response = self.gd_client.Delete(media_entry.GetEditLink().href)
self.assert_(response)
def testMediaConstructorDefaults(self):
ms = gdata.MediaSource()
ms.setFile(test_image_location, 'image/jpeg')
self.assert_(ms is not None)
self.assert_(isinstance(ms, gdata.MediaSource))
self.assertEquals(ms.file_name, test_image_name)
self.assertEquals(ms.content_type, 'image/jpeg')
def testMediaConstructorWithFilePath(self):
ms = gdata.MediaSource(file_path=test_image_location,
content_type='image/jpeg')
self.assert_(ms is not None)
self.assert_(isinstance(ms, gdata.MediaSource))
self.assertEquals(ms.file_name, test_image_name)
self.assertEquals(ms.content_type, 'image/jpeg')
def testMediaConstructorWithFileHandle(self):
fh = open(test_image_location, 'r')
len = os.path.getsize(test_image_location)
ms = gdata.MediaSource(fh, 'image/jpeg', len, file_name=test_image_location)
self.assert_(ms is not None)
self.assert_(isinstance(ms, gdata.MediaSource))
self.assertEquals(ms.file_name, test_image_location)
self.assertEquals(ms.content_type, 'image/jpeg')
class GDataServiceUnitTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.service.GDataService()
self.gd_client.email = username
self.gd_client.password = password
self.gd_client.service = 'gbase'
self.gd_client.source = 'GDataClient "Unit" Tests'
def testProperties(self):
email_string = 'Test Email'
password_string = 'Passwd'
self.gd_client.email = email_string
self.assertEquals(self.gd_client.email, email_string)
self.gd_client.password = password_string
self.assertEquals(self.gd_client.password, password_string)
def testCorrectLogin(self):
try:
self.gd_client.ProgrammaticLogin()
self.assert_(isinstance(
self.gd_client.token_store.find_token(
'http://base.google.com/base/feeds/'),
gdata.auth.ClientLoginToken))
self.assert_(self.gd_client.captcha_token is None)
self.assert_(self.gd_client.captcha_url is None)
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
def testDefaultHttpClient(self):
self.assert_(isinstance(self.gd_client.http_client,
atom.http.HttpClient))
def testGet(self):
try:
self.gd_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
except gdata.service.BadAuthentication:
self.fail('Bad Authentication')
except gdata.service.Error:
self.fail('Login Error')
self.gd_client.additional_headers = {'X-Google-Key':
'ABQIAAAAoLioN3buSs9KqIIq9V' +
'mkFxT2yXp_ZAY8_ufC3CFXhHIE' +
'1NvwkxRK8C1Q8OWhsWA2AIKv-c' +
'VKlVrNhQ'}
self.gd_client.server = 'base.google.com'
result = self.gd_client.Get('/base/feeds/snippets?bq=digital+camera')
self.assert_(result is not None)
self.assert_(isinstance(result, atom.Feed))
def testGetWithAuthentication(self):
try:
self.gd_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
except gdata.service.BadAuthentication:
self.fail('Bad Authentication')
except gdata.service.Error:
self.fail('Login Error')
self.gd_client.additional_headers = {'X-Google-Key':
'ABQIAAAAoLioN3buSs9KqIIq9V' +
'mkFxT2yXp_ZAY8_ufC3CFXhHIE' +
'1NvwkxRK8C1Q8OWhsWA2AIKv-c' +
'VKlVrNhQ'}
self.gd_client.server = 'base.google.com'
result = self.gd_client.Get('/base/feeds/items?bq=digital+camera')
self.assert_(result is not None)
self.assert_(isinstance(result, atom.Feed))
def testGetEntry(self):
try:
self.gd_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
except gdata.service.BadAuthentication:
self.fail('Bad Authentication')
except gdata.service.Error:
self.fail('Login Error')
self.gd_client.server = 'base.google.com'
try:
result = self.gd_client.GetEntry('/base/feeds/items?bq=digital+camera')
self.fail(
'Result from server in GetEntry should have raised an exception')
except gdata.service.UnexpectedReturnType:
pass
def testGetFeed(self):
try:
self.gd_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
except gdata.service.BadAuthentication:
self.fail('Bad Authentication')
except gdata.service.Error:
self.fail('Login Error')
self.gd_client.server = 'base.google.com'
result = self.gd_client.GetFeed('/base/feeds/items?bq=digital+camera')
self.assert_(result is not None)
self.assert_(isinstance(result, atom.Feed))
def testGetWithResponseTransformer(self):
# Query Google Base and interpret the results as a GBaseSnippetFeed.
feed = self.gd_client.Get(
'http://www.google.com/base/feeds/snippets?bq=digital+camera',
converter=gdata.base.GBaseSnippetFeedFromString)
self.assertEquals(isinstance(feed, gdata.base.GBaseSnippetFeed), True)
def testPostPutAndDelete(self):
try:
self.gd_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
except gdata.service.BadAuthentication:
self.fail('Bad Authentication')
except gdata.service.Error:
self.fail('Login Error')
self.gd_client.additional_headers = {'X-Google-Key':
'ABQIAAAAoLioN3buSs9KqIIq9V' +
'mkFxT2yXp_ZAY8_ufC3CFXhHIE' +
'1NvwkxRK8C1Q8OWhsWA2AIKv-c' +
'VKlVrNhQ'}
self.gd_client.server = 'base.google.com'
# Insert a new item
response = self.gd_client.Post(test_data.TEST_BASE_ENTRY,
'/base/feeds/items')
self.assert_(response is not None)
self.assert_(isinstance(response, atom.Entry))
self.assert_(response.category[0].term == 'products')
# Find the item id of the created item
item_id = response.id.text.lstrip(
'http://www.google.com/base/feeds/items/')
self.assert_(item_id is not None)
updated_xml = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
# Change one of the labels in the item
updated_xml.label[2].text = 'beach ball'
# Update the item
response = self.gd_client.Put(updated_xml,
'/base/feeds/items/%s' % item_id)
self.assert_(response is not None)
new_base_item = gdata.base.GBaseItemFromString(str(response))
self.assert_(isinstance(new_base_item, atom.Entry))
# Delete the item the test just created.
response = self.gd_client.Delete('/base/feeds/items/%s' % item_id)
self.assert_(response)
def testPostPutAndDeleteWithConverters(self):
try:
self.gd_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
self.fail('Required Captcha')
except gdata.service.BadAuthentication:
self.fail('Bad Authentication')
except gdata.service.Error:
self.fail('Login Error')
self.gd_client.additional_headers = {'X-Google-Key':
'ABQIAAAAoLioN3buSs9KqIIq9V' +
'mkFxT2yXp_ZAY8_ufC3CFXhHIE' +
'1NvwkxRK8C1Q8OWhsWA2AIKv-c' +
'VKlVrNhQ'}
self.gd_client.server = 'base.google.com'
# Insert a new item
response = self.gd_client.Post(test_data.TEST_BASE_ENTRY,
'/base/feeds/items', converter=gdata.base.GBaseItemFromString)
self.assert_(response is not None)
self.assert_(isinstance(response, atom.Entry))
self.assert_(isinstance(response, gdata.base.GBaseItem))
self.assert_(response.category[0].term == 'products')
updated_xml = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY)
# Change one of the labels in the item
updated_xml.label[2].text = 'beach ball'
# Update the item
response = self.gd_client.Put(updated_xml,
response.id.text,
converter=gdata.base.GBaseItemFromString)
self.assertEquals(response is not None, True)
self.assertEquals(isinstance(response, gdata.base.GBaseItem), True)
# Delete the item the test just created.
response = self.gd_client.Delete(response.id.text)
self.assert_(response)
def testCaptchaUrlGeneration(self):
# Populate the mock server with a pairing for a ClientLogin request to a
# CAPTCHA challenge.
mock_client = atom.mock_http.MockHttpClient()
captcha_response = atom.mock_http.MockResponse(
body="""Url=http://www.google.com/login/captcha
Error=CaptchaRequired
CaptchaToken=DQAAAGgAdkI1LK9
CaptchaUrl=Captcha?ctoken=HiteT4b0Bk5Xg18_AcVoP6-yFkHPibe7O9EqxeiI7lUSN
""", status=403, reason='Access Forbidden')
mock_client.add_response(captcha_response, 'POST',
'https://www.google.com/accounts/ClientLogin')
# Set the exising client's handler so that it will make requests to the
# mock service instead of the real server.
self.gd_client.http_client = mock_client
try:
self.gd_client.ProgrammaticLogin()
self.fail('Login attempt should have caused a CAPTCHA challenge.')
except gdata.service.CaptchaRequired, error:
self.assertEquals(self.gd_client.captcha_url,
('https://www.google.com/accounts/Captcha?ctoken=HiteT4b0Bk5Xg18_'
'AcVoP6-yFkHPibe7O9EqxeiI7lUSN'))
class DeleteWithUrlParamsTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.service.GDataService()
# Set the client to echo the request back in the response.
self.gd_client.http_client.v2_http_client = (
atom.mock_http_core.SettableHttpClient(200, 'OK', '', {}))
def testDeleteWithUrlParams(self):
self.assert_(self.gd_client.Delete('http://example.com/test',
{'TestHeader': '123'}, {'urlParam1': 'a', 'urlParam2': 'test'}))
request = self.gd_client.http_client.v2_http_client.last_request
self.assertEqual(request.uri.host, 'example.com')
self.assertEqual(request.uri.path, '/test')
self.assertEqual(request.uri.query,
{'urlParam1': 'a', 'urlParam2': 'test'})
def testDeleteWithSessionId(self):
self.gd_client._SetSessionId('test_session_id')
self.assert_(self.gd_client.Delete('http://example.com/test',
{'TestHeader': '123'}, {'urlParam1': 'a', 'urlParam2': 'test'}))
request = self.gd_client.http_client.v2_http_client.last_request
self.assertEqual(request.uri.host, 'example.com')
self.assertEqual(request.uri.path, '/test')
self.assertEqual(request.uri.query, {'urlParam1': 'a',
'urlParam2': 'test', 'gsessionid': 'test_session_id'})
class QueryTest(unittest.TestCase):
def setUp(self):
self.query = gdata.service.Query()
def testQueryShouldBehaveLikeDict(self):
try:
self.query['zap']
self.fail()
except KeyError:
pass
self.query['zap'] = 'x'
self.assert_(self.query['zap'] == 'x')
def testContructorShouldRejectBadInputs(self):
test_q = gdata.service.Query(params=[1,2,3,4])
self.assert_(len(test_q.keys()) == 0)
def testTextQueryProperty(self):
self.assert_(self.query.text_query is None)
self.query['q'] = 'test1'
self.assert_(self.query.text_query == 'test1')
self.query.text_query = 'test2'
self.assert_(self.query.text_query == 'test2')
def testOrderByQueryProperty(self):
self.assert_(self.query.orderby is None)
self.query['orderby'] = 'updated'
self.assert_(self.query.orderby == 'updated')
self.query.orderby = 'starttime'
self.assert_(self.query.orderby == 'starttime')
def testQueryShouldProduceExampleUris(self):
self.query.feed = '/base/feeds/snippets'
self.query.text_query = 'This is a test'
self.assert_(self.query.ToUri() == '/base/feeds/snippets?q=This+is+a+test')
def testCategoriesFormattedCorrectly(self):
self.query.feed = '/x'
self.query.categories.append('Fritz')
self.query.categories.append('Laurie')
self.assert_(self.query.ToUri() == '/x/-/Fritz/Laurie')
# The query's feed should not have been changed
self.assert_(self.query.feed == '/x')
self.assert_(self.query.ToUri() == '/x/-/Fritz/Laurie')
def testCategoryQueriesShouldEscapeOrSymbols(self):
self.query.feed = '/x'
self.query.categories.append('Fritz|Laurie')
self.assert_(self.query.ToUri() == '/x/-/Fritz%7CLaurie')
def testTypeCoercionOnIntParams(self):
self.query.feed = '/x'
self.query.max_results = 10
self.query.start_index = 5
self.assert_(isinstance(self.query.max_results, str))
self.assert_(isinstance(self.query.start_index, str))
self.assertEquals(self.query['max-results'], '10')
self.assertEquals(self.query['start-index'], '5')
def testPassInCategoryListToConstructor(self):
query = gdata.service.Query(feed='/feed/sample', categories=['foo', 'bar',
'eggs|spam'])
url = query.ToUri()
self.assert_(url.find('/foo') > -1)
self.assert_(url.find('/bar') > -1)
self.assert_(url.find('/eggs%7Cspam') > -1)
class GetNextPageInFeedTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.service.GDataService()
def testGetNextPage(self):
feed = self.gd_client.Get(
'http://www.google.com/base/feeds/snippets?max-results=2',
converter=gdata.base.GBaseSnippetFeedFromString)
self.assert_(len(feed.entry) > 0)
first_id = feed.entry[0].id.text
feed2 = self.gd_client.GetNext(feed)
self.assert_(len(feed2.entry) > 0)
next_id = feed2.entry[0].id.text
self.assert_(first_id != next_id)
self.assert_(feed2.__class__ == feed.__class__)
class ScopeLookupTest(unittest.TestCase):
def testLookupScopes(self):
scopes = gdata.service.lookup_scopes('cl')
self.assertEquals(scopes, gdata.service.CLIENT_LOGIN_SCOPES['cl'])
scopes = gdata.service.lookup_scopes(None)
self.assert_(scopes is None)
scopes = gdata.service.lookup_scopes('UNKNOWN_SERVICE')
self.assert_(scopes is None)
class TokenLookupTest(unittest.TestCase):
def setUp(self):
self.client = gdata.service.GDataService()
def testSetAndGetClientLoginTokenWithNoService(self):
self.assert_(self.client.auth_token is None)
self.client.SetClientLoginToken('foo')
self.assert_(self.client.auth_token is None)
self.assert_(self.client.token_store.find_token(
atom.token_store.SCOPE_ALL) is not None)
self.assertEquals(self.client.GetClientLoginToken(), 'foo')
self.client.SetClientLoginToken('foo2')
self.assertEquals(self.client.GetClientLoginToken(), 'foo2')
def testSetAndGetClientLoginTokenWithService(self):
self.client.service = 'cp'
self.client.SetClientLoginToken('bar')
self.assertEquals(self.client.GetClientLoginToken(), 'bar')
# Changing the service should cause the token to no longer be found.
self.client.service = 'gbase'
self.client.current_token = None
self.assert_(self.client.GetClientLoginToken() is None)
def testSetAndGetClientLoginTokenWithScopes(self):
scopes = gdata.service.CLIENT_LOGIN_SCOPES['cl'][:]
scopes.extend(gdata.service.CLIENT_LOGIN_SCOPES['gbase'])
self.client.SetClientLoginToken('baz', scopes=scopes)
self.client.current_token = None
self.assert_(self.client.GetClientLoginToken() is None)
self.client.service = 'cl'
self.assertEquals(self.client.GetClientLoginToken(), 'baz')
self.client.service = 'gbase'
self.assertEquals(self.client.GetClientLoginToken(), 'baz')
self.client.service = 'wise'
self.assert_(self.client.GetClientLoginToken() is None)
def testLookupUsingTokenStore(self):
scopes = gdata.service.CLIENT_LOGIN_SCOPES['cl'][:]
scopes.extend(gdata.service.CLIENT_LOGIN_SCOPES['gbase'])
self.client.SetClientLoginToken('baz', scopes=scopes)
token = self.client.token_store.find_token(
'http://www.google.com/calendar/feeds/foo')
self.assertEquals(token.get_token_string(), 'baz')
self.assertEquals(token.auth_header, '%s%s' % (
gdata.auth.PROGRAMMATIC_AUTH_LABEL, 'baz'))
token = self.client.token_store.find_token(
'http://www.google.com/calendar/')
self.assert_(isinstance(token, gdata.auth.ClientLoginToken) == False)
token = self.client.token_store.find_token(
'http://www.google.com/base/feeds/snippets')
self.assertEquals(token.get_token_string(), 'baz')
if __name__ == '__main__':
print ('GData Service Media Unit Tests\nNOTE: Please run these tests only '
'with a test account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jhartmann@gmail.com (Jochen Hartmann)'
import unittest
from gdata import test_data
import gdata.youtube
import gdata.youtube.service
import atom
YOUTUBE_TEMPLATE = '{http://gdata.youtube.com/schemas/2007}%s'
YT_FORMAT = YOUTUBE_TEMPLATE % ('format')
class VideoEntryTest(unittest.TestCase):
def setUp(self):
self.video_feed = gdata.youtube.YouTubeVideoFeedFromString(
test_data.YOUTUBE_VIDEO_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(self.video_feed.id.text,
'http://gdata.youtube.com/feeds/api/standardfeeds/top_rated')
self.assertEquals(len(self.video_feed.entry), 2)
for entry in self.video_feed.entry:
if (entry.id.text ==
'http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8'):
self.assertEquals(entry.published.text, '2008-03-20T10:17:27.000-07:00')
self.assertEquals(entry.updated.text, '2008-05-14T04:26:37.000-07:00')
self.assertEquals(entry.category[0].scheme,
'http://gdata.youtube.com/schemas/2007/keywords.cat')
self.assertEquals(entry.category[0].term, 'karyn')
self.assertEquals(entry.category[1].scheme,
'http://gdata.youtube.com/schemas/2007/keywords.cat')
self.assertEquals(entry.category[1].term, 'garcia')
self.assertEquals(entry.category[2].scheme,
'http://gdata.youtube.com/schemas/2007/keywords.cat')
self.assertEquals(entry.category[2].term, 'me')
self.assertEquals(entry.category[3].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(entry.category[3].term,
'http://gdata.youtube.com/schemas/2007#video')
self.assertEquals(entry.title.text,
'Me odeio por te amar - KARYN GARCIA')
self.assertEquals(entry.content.text, 'http://www.karyngarcia.com.br')
self.assertEquals(entry.link[0].rel, 'alternate')
self.assertEquals(entry.link[0].href,
'http://www.youtube.com/watch?v=C71ypXYGho8')
self.assertEquals(entry.link[1].rel,
'http://gdata.youtube.com/schemas/2007#video.related')
self.assertEquals(entry.link[1].href,
'http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8/related')
self.assertEquals(entry.link[2].rel, 'self')
self.assertEquals(entry.link[2].href,
('http://gdata.youtube.com/feeds/api/standardfeeds'
'/top_rated/C71ypXYGho8'))
self.assertEquals(entry.author[0].name.text, 'TvKarynGarcia')
self.assertEquals(entry.author[0].uri.text,
'http://gdata.youtube.com/feeds/api/users/tvkaryngarcia')
self.assertEquals(entry.media.title.text,
'Me odeio por te amar - KARYN GARCIA')
self.assertEquals(entry.media.description.text,
'http://www.karyngarcia.com.br')
self.assertEquals(entry.media.keywords.text,
'amar, boyfriend, garcia, karyn, me, odeio, por, te')
self.assertEquals(entry.media.duration.seconds, '203')
self.assertEquals(entry.media.category[0].label, 'Music')
self.assertEquals(entry.media.category[0].scheme,
'http://gdata.youtube.com/schemas/2007/categories.cat')
self.assertEquals(entry.media.category[0].text, 'Music')
self.assertEquals(entry.media.category[1].label, 'test111')
self.assertEquals(entry.media.category[1].scheme,
'http://gdata.youtube.com/schemas/2007/developertags.cat')
self.assertEquals(entry.media.category[1].text, 'test111')
self.assertEquals(entry.media.category[2].label, 'test222')
self.assertEquals(entry.media.category[2].scheme,
'http://gdata.youtube.com/schemas/2007/developertags.cat')
self.assertEquals(entry.media.category[2].text, 'test222')
self.assertEquals(entry.media.content[0].url,
'http://www.youtube.com/v/C71ypXYGho8')
self.assertEquals(entry.media.content[0].type,
'application/x-shockwave-flash')
self.assertEquals(entry.media.content[0].medium, 'video')
self.assertEquals(
entry.media.content[0].extension_attributes['isDefault'], 'true')
self.assertEquals(
entry.media.content[0].extension_attributes['expression'], 'full')
self.assertEquals(
entry.media.content[0].extension_attributes['duration'], '203')
self.assertEquals(
entry.media.content[0].extension_attributes[YT_FORMAT], '5')
self.assertEquals(entry.media.content[1].url,
('rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYDSANFEgGDA'
'==/0/0/0/video.3gp'))
self.assertEquals(entry.media.content[1].type, 'video/3gpp')
self.assertEquals(entry.media.content[1].medium, 'video')
self.assertEquals(
entry.media.content[1].extension_attributes['expression'], 'full')
self.assertEquals(
entry.media.content[1].extension_attributes['duration'], '203')
self.assertEquals(
entry.media.content[1].extension_attributes[YT_FORMAT], '1')
self.assertEquals(entry.media.content[2].url,
('rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYESARFEgGDA=='
'/0/0/0/video.3gp'))
self.assertEquals(entry.media.content[2].type, 'video/3gpp')
self.assertEquals(entry.media.content[2].medium, 'video')
self.assertEquals(
entry.media.content[2].extension_attributes['expression'], 'full')
self.assertEquals(
entry.media.content[2].extension_attributes['duration'], '203')
self.assertEquals(
entry.media.content[2].extension_attributes[YT_FORMAT], '6')
self.assertEquals(entry.media.player.url,
'http://www.youtube.com/watch?v=C71ypXYGho8')
self.assertEquals(entry.media.thumbnail[0].url,
'http://img.youtube.com/vi/C71ypXYGho8/2.jpg')
self.assertEquals(entry.media.thumbnail[0].height, '97')
self.assertEquals(entry.media.thumbnail[0].width, '130')
self.assertEquals(entry.media.thumbnail[0].extension_attributes['time'],
'00:01:41.500')
self.assertEquals(entry.media.thumbnail[1].url,
'http://img.youtube.com/vi/C71ypXYGho8/1.jpg')
self.assertEquals(entry.media.thumbnail[1].height, '97')
self.assertEquals(entry.media.thumbnail[1].width, '130')
self.assertEquals(entry.media.thumbnail[1].extension_attributes['time'],
'00:00:50.750')
self.assertEquals(entry.media.thumbnail[2].url,
'http://img.youtube.com/vi/C71ypXYGho8/3.jpg')
self.assertEquals(entry.media.thumbnail[2].height, '97')
self.assertEquals(entry.media.thumbnail[2].width, '130')
self.assertEquals(entry.media.thumbnail[2].extension_attributes['time'],
'00:02:32.250')
self.assertEquals(entry.media.thumbnail[3].url,
'http://img.youtube.com/vi/C71ypXYGho8/0.jpg')
self.assertEquals(entry.media.thumbnail[3].height, '240')
self.assertEquals(entry.media.thumbnail[3].width, '320')
self.assertEquals(entry.media.thumbnail[3].extension_attributes['time'],
'00:01:41.500')
self.assertEquals(entry.statistics.view_count, '138864')
self.assertEquals(entry.statistics.favorite_count, '2474')
self.assertEquals(entry.rating.min, '1')
self.assertEquals(entry.rating.max, '5')
self.assertEquals(entry.rating.num_raters, '4626')
self.assertEquals(entry.rating.average, '4.95')
self.assertEquals(entry.comments.feed_link[0].href,
('http://gdata.youtube.com/feeds/api/videos/'
'C71ypXYGho8/comments'))
self.assertEquals(entry.comments.feed_link[0].count_hint, '27')
self.assertEquals(entry.GetSwfUrl(),
'http://www.youtube.com/v/C71ypXYGho8')
self.assertEquals(entry.GetYouTubeCategoryAsString(), 'Music')
class VideoEntryPrivateTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.youtube.YouTubeVideoEntryFromString(
test_data.YOUTUBE_ENTRY_PRIVATE)
def testCorrectXmlParsing(self):
self.assert_(isinstance(self.entry,
gdata.youtube.YouTubeVideoEntry))
self.assert_(self.entry.media.private)
class VideoFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeVideoFeedFromString(
test_data.YOUTUBE_VIDEO_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(self.feed.id.text,
'http://gdata.youtube.com/feeds/api/standardfeeds/top_rated')
self.assertEquals(self.feed.generator.text, 'YouTube data API')
self.assertEquals(self.feed.generator.uri, 'http://gdata.youtube.com/')
self.assertEquals(len(self.feed.author), 1)
self.assertEquals(self.feed.author[0].name.text, 'YouTube')
self.assertEquals(len(self.feed.category), 1)
self.assertEquals(self.feed.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[0].term,
'http://gdata.youtube.com/schemas/2007#video')
self.assertEquals(self.feed.items_per_page.text, '25')
self.assertEquals(len(self.feed.link), 4)
self.assertEquals(self.feed.link[0].href,
'http://www.youtube.com/browse?s=tr')
self.assertEquals(self.feed.link[0].rel, 'alternate')
self.assertEquals(self.feed.link[1].href,
'http://gdata.youtube.com/feeds/api/standardfeeds/top_rated')
self.assertEquals(self.feed.link[1].rel,
'http://schemas.google.com/g/2005#feed')
self.assertEquals(self.feed.link[2].href,
('http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?'
'start-index=1&max-results=25'))
self.assertEquals(self.feed.link[2].rel, 'self')
self.assertEquals(self.feed.link[3].href,
('http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?'
'start-index=26&max-results=25'))
self.assertEquals(self.feed.link[3].rel, 'next')
self.assertEquals(self.feed.start_index.text, '1')
self.assertEquals(self.feed.title.text, 'Top Rated')
self.assertEquals(self.feed.total_results.text, '100')
self.assertEquals(self.feed.updated.text, '2008-05-14T02:24:07.000-07:00')
self.assertEquals(len(self.feed.entry), 2)
class YouTubePlaylistFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubePlaylistFeedFromString(
test_data.YOUTUBE_PLAYLIST_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.entry), 1)
self.assertEquals(
self.feed.category[0].scheme, 'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[0].term,
'http://gdata.youtube.com/schemas/2007#playlistLink')
class YouTubePlaylistEntryTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubePlaylistFeedFromString(
test_data.YOUTUBE_PLAYLIST_FEED)
def testCorrectXmlParsing(self):
for entry in self.feed.entry:
self.assertEquals(entry.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(entry.category[0].term,
'http://gdata.youtube.com/schemas/2007#playlistLink')
self.assertEquals(entry.description.text,
'My new playlist Description')
self.assertEquals(entry.feed_link[0].href,
'http://gdata.youtube.com/feeds/playlists/8BCDD04DE8F771B2')
self.assertEquals(entry.feed_link[0].rel,
'http://gdata.youtube.com/schemas/2007#playlist')
class YouTubePlaylistVideoFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubePlaylistVideoFeedFromString(
test_data.YOUTUBE_PLAYLIST_VIDEO_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.entry), 1)
self.assertEquals(self.feed.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[0].term,
'http://gdata.youtube.com/schemas/2007#playlist')
self.assertEquals(self.feed.category[1].scheme,
'http://gdata.youtube.com/schemas/2007/tags.cat')
self.assertEquals(self.feed.category[1].term, 'videos')
self.assertEquals(self.feed.category[2].scheme,
'http://gdata.youtube.com/schemas/2007/tags.cat')
self.assertEquals(self.feed.category[2].term, 'python')
class YouTubePlaylistVideoEntryTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubePlaylistVideoFeedFromString(
test_data.YOUTUBE_PLAYLIST_VIDEO_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.entry), 1)
for entry in self.feed.entry:
self.assertEquals(entry.position.text, '1')
class YouTubeVideoCommentFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeVideoCommentFeedFromString(
test_data.YOUTUBE_COMMENT_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.category), 1)
self.assertEquals(self.feed.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[0].term,
'http://gdata.youtube.com/schemas/2007#comment')
self.assertEquals(len(self.feed.link), 4)
self.assertEquals(self.feed.link[0].rel, 'related')
self.assertEquals(self.feed.link[0].href,
'http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU')
self.assertEquals(self.feed.link[1].rel, 'alternate')
self.assertEquals(self.feed.link[1].href,
'http://www.youtube.com/watch?v=2Idhz9ef5oU')
self.assertEquals(self.feed.link[2].rel,
'http://schemas.google.com/g/2005#feed')
self.assertEquals(self.feed.link[2].href,
'http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments')
self.assertEquals(self.feed.link[3].rel, 'self')
self.assertEquals(self.feed.link[3].href,
('http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments?'
'start-index=1&max-results=25'))
self.assertEquals(len(self.feed.entry), 3)
class YouTubeVideoCommentEntryTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeVideoCommentFeedFromString(
test_data.YOUTUBE_COMMENT_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.entry), 3)
self.assert_(isinstance(self.feed.entry[0],
gdata.youtube.YouTubeVideoCommentEntry))
for entry in self.feed.entry:
if (entry.id.text ==
('http://gdata.youtube.com/feeds/videos/'
'2Idhz9ef5oU/comments/91F809A3DE2EB81B')):
self.assertEquals(entry.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(entry.category[0].term,
'http://gdata.youtube.com/schemas/2007#comment')
self.assertEquals(entry.link[0].href,
'http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU')
self.assertEquals(entry.link[0].rel, 'related')
self.assertEquals(entry.content.text, 'test66')
class YouTubeVideoSubscriptionFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeSubscriptionFeedFromString(
test_data.YOUTUBE_SUBSCRIPTION_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.category), 1)
self.assertEquals(self.feed.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[0].term,
'http://gdata.youtube.com/schemas/2007#subscription')
self.assertEquals(len(self.feed.link), 4)
self.assertEquals(self.feed.link[0].rel, 'related')
self.assertEquals(self.feed.link[0].href,
'http://gdata.youtube.com/feeds/users/andyland74')
self.assertEquals(self.feed.link[1].rel, 'alternate')
self.assertEquals(self.feed.link[1].href,
'http://www.youtube.com/profile_subscriptions?user=andyland74')
self.assertEquals(self.feed.link[2].rel,
'http://schemas.google.com/g/2005#feed')
self.assertEquals(self.feed.link[2].href,
'http://gdata.youtube.com/feeds/users/andyland74/subscriptions')
self.assertEquals(self.feed.link[3].rel, 'self')
self.assertEquals(self.feed.link[3].href,
('http://gdata.youtube.com/feeds/users/andyland74/subscriptions?'
'start-index=1&max-results=25'))
self.assertEquals(len(self.feed.entry), 1)
class YouTubeVideoSubscriptionEntryTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeSubscriptionFeedFromString(
test_data.YOUTUBE_SUBSCRIPTION_FEED)
def testCorrectXmlParsing(self):
for entry in self.feed.entry:
self.assertEquals(len(entry.category), 2)
self.assertEquals(entry.category[0].scheme,
'http://gdata.youtube.com/schemas/2007/subscriptiontypes.cat')
self.assertEquals(entry.category[0].term, 'channel')
self.assertEquals(entry.category[1].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(entry.category[1].term,
'http://gdata.youtube.com/schemas/2007#subscription')
self.assertEquals(len(entry.link), 3)
self.assertEquals(entry.link[0].href,
'http://gdata.youtube.com/feeds/users/andyland74')
self.assertEquals(entry.link[0].rel, 'related')
self.assertEquals(entry.link[1].href,
'http://www.youtube.com/profile_videos?user=NBC')
self.assertEquals(entry.link[1].rel, 'alternate')
self.assertEquals(entry.link[2].href,
('http://gdata.youtube.com/feeds/users/andyland74/subscriptions/'
'd411759045e2ad8c'))
self.assertEquals(entry.link[2].rel, 'self')
self.assertEquals(len(entry.feed_link), 1)
self.assertEquals(entry.feed_link[0].href,
'http://gdata.youtube.com/feeds/api/users/nbc/uploads')
self.assertEquals(entry.feed_link[0].rel,
'http://gdata.youtube.com/schemas/2007#user.uploads')
self.assertEquals(entry.username.text, 'NBC')
class YouTubeVideoResponseFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeVideoFeedFromString(
test_data.YOUTUBE_VIDEO_RESPONSE_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.category), 1)
self.assertEquals(self.feed.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[0].term,
'http://gdata.youtube.com/schemas/2007#video')
self.assertEquals(len(self.feed.link), 4)
self.assertEquals(self.feed.link[0].href,
'http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY')
self.assertEquals(self.feed.link[0].rel, 'related')
self.assertEquals(self.feed.link[1].href,
'http://www.youtube.com/video_response_view_all?v=2c3q9K4cHzY')
self.assertEquals(self.feed.link[1].rel, 'alternate')
self.assertEquals(self.feed.link[2].href,
'http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses')
self.assertEquals(self.feed.link[2].rel,
'http://schemas.google.com/g/2005#feed')
self.assertEquals(self.feed.link[3].href,
('http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses?'
'start-index=1&max-results=25'))
self.assertEquals(self.feed.link[3].rel, 'self')
self.assertEquals(len(self.feed.entry), 1)
class YouTubeVideoResponseEntryTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeVideoFeedFromString(
test_data.YOUTUBE_VIDEO_RESPONSE_FEED)
def testCorrectXmlParsing(self):
for entry in self.feed.entry:
self.assert_(isinstance(entry, gdata.youtube.YouTubeVideoEntry))
class YouTubeContactFeed(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeContactFeedFromString(
test_data.YOUTUBE_CONTACTS_FEED)
def testCorrectXmlParsing(self):
self.assertEquals(len(self.feed.entry), 2)
self.assertEquals(self.feed.category[0].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[0].term,
'http://gdata.youtube.com/schemas/2007#friend')
class YouTubeContactEntry(unittest.TestCase):
def setUp(self):
self.feed= gdata.youtube.YouTubeContactFeedFromString(
test_data.YOUTUBE_CONTACTS_FEED)
def testCorrectXmlParsing(self):
for entry in self.feed.entry:
if (entry.id.text == ('http://gdata.youtube.com/feeds/users/'
'apitestjhartmann/contacts/testjfisher')):
self.assertEquals(entry.username.text, 'testjfisher')
self.assertEquals(entry.status.text, 'pending')
class YouTubeUserEntry(unittest.TestCase):
def setUp(self):
self.feed = gdata.youtube.YouTubeUserEntryFromString(
test_data.YOUTUBE_PROFILE)
def testCorrectXmlParsing(self):
self.assertEquals(self.feed.author[0].name.text, 'andyland74')
self.assertEquals(self.feed.books.text, 'Catch-22')
self.assertEquals(self.feed.category[0].scheme,
'http://gdata.youtube.com/schemas/2007/channeltypes.cat')
self.assertEquals(self.feed.category[0].term, 'Standard')
self.assertEquals(self.feed.category[1].scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(self.feed.category[1].term,
'http://gdata.youtube.com/schemas/2007#userProfile')
self.assertEquals(self.feed.company.text, 'Google')
self.assertEquals(self.feed.gender.text, 'm')
self.assertEquals(self.feed.hobbies.text, 'Testing YouTube APIs')
self.assertEquals(self.feed.hometown.text, 'Somewhere')
self.assertEquals(len(self.feed.feed_link), 6)
self.assertEquals(self.feed.feed_link[0].count_hint, '4')
self.assertEquals(self.feed.feed_link[0].href,
'http://gdata.youtube.com/feeds/users/andyland74/favorites')
self.assertEquals(self.feed.feed_link[0].rel,
'http://gdata.youtube.com/schemas/2007#user.favorites')
self.assertEquals(self.feed.feed_link[1].count_hint, '1')
self.assertEquals(self.feed.feed_link[1].href,
'http://gdata.youtube.com/feeds/users/andyland74/contacts')
self.assertEquals(self.feed.feed_link[1].rel,
'http://gdata.youtube.com/schemas/2007#user.contacts')
self.assertEquals(self.feed.feed_link[2].count_hint, '0')
self.assertEquals(self.feed.feed_link[2].href,
'http://gdata.youtube.com/feeds/users/andyland74/inbox')
self.assertEquals(self.feed.feed_link[2].rel,
'http://gdata.youtube.com/schemas/2007#user.inbox')
self.assertEquals(self.feed.feed_link[3].count_hint, None)
self.assertEquals(self.feed.feed_link[3].href,
'http://gdata.youtube.com/feeds/users/andyland74/playlists')
self.assertEquals(self.feed.feed_link[3].rel,
'http://gdata.youtube.com/schemas/2007#user.playlists')
self.assertEquals(self.feed.feed_link[4].count_hint, '4')
self.assertEquals(self.feed.feed_link[4].href,
'http://gdata.youtube.com/feeds/users/andyland74/subscriptions')
self.assertEquals(self.feed.feed_link[4].rel,
'http://gdata.youtube.com/schemas/2007#user.subscriptions')
self.assertEquals(self.feed.feed_link[5].count_hint, '1')
self.assertEquals(self.feed.feed_link[5].href,
'http://gdata.youtube.com/feeds/users/andyland74/uploads')
self.assertEquals(self.feed.feed_link[5].rel,
'http://gdata.youtube.com/schemas/2007#user.uploads')
self.assertEquals(self.feed.first_name.text, 'andy')
self.assertEquals(self.feed.last_name.text, 'example')
self.assertEquals(self.feed.link[0].href,
'http://www.youtube.com/profile?user=andyland74')
self.assertEquals(self.feed.link[0].rel, 'alternate')
self.assertEquals(self.feed.link[1].href,
'http://gdata.youtube.com/feeds/users/andyland74')
self.assertEquals(self.feed.link[1].rel, 'self')
self.assertEquals(self.feed.location.text, 'US')
self.assertEquals(self.feed.movies.text, 'Aqua Teen Hungerforce')
self.assertEquals(self.feed.music.text, 'Elliott Smith')
self.assertEquals(self.feed.occupation.text, 'Technical Writer')
self.assertEquals(self.feed.published.text, '2006-10-16T00:09:45.000-07:00')
self.assertEquals(self.feed.school.text, 'University of North Carolina')
self.assertEquals(self.feed.statistics.last_web_access,
'2008-02-25T16:03:38.000-08:00')
self.assertEquals(self.feed.statistics.subscriber_count, '1')
self.assertEquals(self.feed.statistics.video_watch_count, '21')
self.assertEquals(self.feed.statistics.view_count, '9')
self.assertEquals(self.feed.thumbnail.url,
'http://i.ytimg.com/vi/YFbSxcdOL-w/default.jpg')
self.assertEquals(self.feed.title.text, 'andyland74 Channel')
self.assertEquals(self.feed.updated.text, '2008-02-26T11:48:21.000-08:00')
self.assertEquals(self.feed.username.text, 'andyland74')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import unittest
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
import gdata.core
# TODO: switch to using v2 atom data once it is available.
import atom
import gdata.test_config as conf
conf.options.register_option(conf.BLOG_ID_OPTION)
class BloggerTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.client.GDClient()
conf.configure_client(self.client, 'BloggerTest', 'blogger')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
blog_post = atom.Entry(
title=atom.Title(text='test from python BloggerTest'),
content=atom.Content(text='This is only a test.'))
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(str(blog_post), 'application/atom+xml')
def entry_from_string_wrapper(response):
self.assert_(response.getheader('content-type') is not None)
self.assert_(response.getheader('gdata-version') is not None)
return atom.EntryFromString(response.read())
entry = self.client.request('POST',
'http://www.blogger.com/feeds/%s/posts/default' % (
conf.options.get_value('blogid')),
converter=entry_from_string_wrapper, http_request=http_request)
self.assertEqual(entry.title.text, 'test from python BloggerTest')
self.assertEqual(entry.content.text, 'This is only a test.')
# Edit the test entry.
edit_link = None
for link in entry.link:
# Find the edit link for this entry.
if link.rel == 'edit':
edit_link = link.href
entry.title.text = 'Edited'
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(str(entry), 'application/atom+xml')
edited_entry = self.client.request('PUT', edit_link,
converter=entry_from_string_wrapper, http_request=http_request)
self.assertEqual(edited_entry.title.text, 'Edited')
self.assertEqual(edited_entry.content.text, entry.content.text)
# Delete the test entry from the blog.
edit_link = None
for link in edited_entry.link:
if link.rel == 'edit':
edit_link = link.href
response = self.client.request('DELETE', edit_link)
self.assertEqual(response.status, 200)
def test_use_version_two(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_use_version_two')
# Use version 2 of the Blogger API.
self.client.api_version = '2'
# Create a v2 blog post entry to post on the blog.
entry = create_element('entry')
entry._other_elements.append(
create_element('title', text='Marriage!',
attributes={'type': 'text'}))
entry._other_elements.append(
create_element('content', attributes={'type': 'text'},
text='Mr. Darcy has proposed marriage to me!'))
entry._other_elements.append(
create_element('category',
attributes={'scheme': TAG, 'term': 'marriage'}))
entry._other_elements.append(
create_element('category',
attributes={'scheme': TAG, 'term': 'Mr. Darcy'}))
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(entry.to_string(), 'application/atom+xml')
posted = self.client.request('POST',
'http://www.blogger.com/feeds/%s/posts/default' % (
conf.options.get_value('blogid')),
converter=element_from_string, http_request=http_request)
# Verify that the blog post content is correct.
self.assertEqual(posted.get_elements('title', ATOM)[0].text, 'Marriage!')
# TODO: uncomment once server bug is fixed.
#self.assertEqual(posted.get_elements('content', ATOM)[0].text,
# 'Mr. Darcy has proposed marriage to me!')
found_tags = [False, False]
categories = posted.get_elements('category', ATOM)
self.assertEqual(len(categories), 2)
for category in categories:
if category.get_attributes('term')[0].value == 'marriage':
found_tags[0] = True
elif category.get_attributes('term')[0].value == 'Mr. Darcy':
found_tags[1] = True
self.assert_(found_tags[0])
self.assert_(found_tags[1])
# Find the blog post on the blog.
self_link = None
edit_link = None
for link in posted.get_elements('link', ATOM):
if link.get_attributes('rel')[0].value == 'self':
self_link = link.get_attributes('href')[0].value
elif link.get_attributes('rel')[0].value == 'edit':
edit_link = link.get_attributes('href')[0].value
self.assert_(self_link is not None)
self.assert_(edit_link is not None)
queried = self.client.request('GET', self_link,
converter=element_from_string)
# TODO: add additional asserts to check content and etags.
# Test queries using ETags.
entry = self.client.get_entry(self_link)
self.assert_(entry.etag is not None)
self.assertRaises(gdata.client.NotModified, self.client.get_entry,
self_link, etag=entry.etag)
# Delete the test blog post.
self.client.request('DELETE', edit_link)
class ContactsTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.client.GDClient()
conf.configure_client(self.client, 'ContactsTest', 'cp')
def tearDown(self):
conf.close_client(self.client)
# Run this test and profiles fails
def test_crud_version_two(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_crud_version_two')
self.client.api_version = '2'
entry = create_element('entry')
entry._other_elements.append(
create_element('title', ATOM, 'Jeff', {'type': 'text'}))
entry._other_elements.append(
create_element('email', GD,
attributes={'address': 'j.s@google.com', 'rel': WORK_REL}))
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(entry.to_string(), 'application/atom+xml')
posted = self.client.request('POST',
'http://www.google.com/m8/feeds/contacts/default/full',
converter=element_from_string, http_request=http_request)
self_link = None
edit_link = None
for link in posted.get_elements('link', ATOM):
if link.get_attributes('rel')[0].value == 'self':
self_link = link.get_attributes('href')[0].value
elif link.get_attributes('rel')[0].value == 'edit':
edit_link = link.get_attributes('href')[0].value
self.assert_(self_link is not None)
self.assert_(edit_link is not None)
etag = posted.get_attributes('etag')[0].value
self.assert_(etag is not None)
self.assert_(len(etag) > 0)
# Delete the test contact.
http_request = atom.http_core.HttpRequest()
http_request.headers['If-Match'] = etag
self.client.request('DELETE', edit_link, http_request=http_request)
class VersionTwoClientContactsTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.client.GDClient()
self.client.api_version = '2'
conf.configure_client(self.client, 'VersionTwoClientContactsTest', 'cp')
self.old_proxy = os.environ.get('https_proxy')
def tearDown(self):
if self.old_proxy:
os.environ['https_proxy'] = self.old_proxy
elif 'https_proxy' in os.environ:
del os.environ['https_proxy']
conf.close_client(self.client)
def test_version_two_client(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_version_two_client')
entry = gdata.data.GDEntry()
entry._other_elements.append(
create_element('title', ATOM, 'Test', {'type': 'text'}))
entry._other_elements.append(
create_element('email', GD,
attributes={'address': 'test@example.com', 'rel': WORK_REL}))
# Create the test contact.
posted = self.client.post(entry,
'https://www.google.com/m8/feeds/contacts/default/full')
self.assert_(isinstance(posted, gdata.data.GDEntry))
self.assertEqual(posted.get_elements('title')[0].text, 'Test')
self.assertEqual(posted.get_elements('email')[0].get_attributes(
'address')[0].value, 'test@example.com')
posted.get_elements('title')[0].text = 'Doug'
edited = self.client.update(posted)
self.assert_(isinstance(edited, gdata.data.GDEntry))
self.assertEqual(edited.get_elements('title')[0].text, 'Doug')
self.assertEqual(edited.get_elements('email')[0].get_attributes(
'address')[0].value, 'test@example.com')
# Delete the test contact.
self.client.delete(edited)
def notest_crud_over_https_proxy(self):
import urllib
PROXY_ADDR = '98.192.125.23'
try:
response = urllib.urlopen('http://' + PROXY_ADDR)
except IOError:
return
# Only bother running the test if the proxy is up
if response.getcode() == 200:
os.environ['https_proxy'] = PROXY_ADDR
# Perform the CRUD test above, this time over a proxy.
self.test_version_two_client()
class JsoncRequestTest(unittest.TestCase):
def setUp(self):
self.client = gdata.client.GDClient()
def test_get_jsonc(self):
jsonc = self.client.get_feed(
'http://gdata.youtube.com/feeds/api/videos?q=surfing&v=2&alt=jsonc',
converter=gdata.core.parse_json_file)
self.assertTrue(len(jsonc.data.items) > 0)
# Utility methods.
# The Atom XML namespace.
ATOM = 'http://www.w3.org/2005/Atom'
# URL used as the scheme for a blog post tag.
TAG = 'http://www.blogger.com/atom/ns#'
# Namespace for Google Data API elements.
GD = 'http://schemas.google.com/g/2005'
WORK_REL = 'http://schemas.google.com/g/2005#work'
def create_element(tag, namespace=ATOM, text=None, attributes=None):
element = atom.core.XmlElement()
element._qname = '{%s}%s' % (namespace, tag)
if text is not None:
element.text = text
if attributes is not None:
element._other_attributes = attributes.copy()
return element
def element_from_string(response):
return atom.core.xml_element_from_string(response.read(),
atom.core.XmlElement)
def suite():
return conf.build_suite([BloggerTest, ContactsTest,
VersionTwoClientContactsTest,
JsoncRequestTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for Google Analytics API query objects.
AnalyticsClientTest: Tests making live requests to Google Analytics API.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import unittest
from gdata.analytics import client
class DataExportQueryTest(unittest.TestCase):
"""Tests making Data Export API Queries."""
def testAccountFeed(self):
"""Tests Account Feed queries."""
queryTest1 = client.AccountFeedQuery()
self.assertEquals(str(queryTest1),
'https://www.google.com/analytics/feeds/accounts/default')
queryTest2 = client.AccountFeedQuery({'max-results': 50})
self.assertEquals(str(queryTest2),
'https://www.google.com/analytics/feeds/accounts/default'
'?max-results=50')
queryTest3 = client.AccountFeedQuery()
queryTest3.query['max-results'] = 100
self.assertEquals(str(queryTest3),
'https://www.google.com/analytics/feeds/accounts/default'
'?max-results=100')
def testDataFeed(self):
"""Tests Data Feed queries."""
queryTest1 = client.DataFeedQuery()
self.assertEquals(str(queryTest1),
'https://www.google.com/analytics/feeds/data')
queryTest2 = client.DataFeedQuery({'ids': 'ga:1234'})
self.assertEquals(str(queryTest2),
'https://www.google.com/analytics/feeds/data?ids=ga%3A1234')
queryTest3 = client.DataFeedQuery()
queryTest3.query['ids'] = 'ga:1234'
self.assertEquals(str(queryTest3),
'https://www.google.com/analytics/feeds/data?ids=ga%3A1234')
class ManagementQueryTest(unittest.TestCase):
"""Tests making Management API queries."""
def setUp(self):
self.base_url = 'https://www.google.com/analytics/feeds/datasources/ga'
def testAccountFeedQuery(self):
"""Tests Account Feed queries."""
queryTest1 = client.AccountQuery()
self.assertEquals(str(queryTest1),
'%s/accounts' % self.base_url)
queryTest2 = client.AccountQuery({'max-results': 50})
self.assertEquals(str(queryTest2),
'%s/accounts?max-results=50' % self.base_url)
def testWebPropertyFeedQuery(self):
"""Tests Web Property Feed queries."""
queryTest1 = client.WebPropertyQuery()
self.assertEquals(str(queryTest1),
'%s/accounts/~all/webproperties' % self.base_url)
queryTest2 = client.WebPropertyQuery('123')
self.assertEquals(str(queryTest2),
'%s/accounts/123/webproperties' % self.base_url)
queryTest3 = client.WebPropertyQuery('123', {'max-results': 100})
self.assertEquals(str(queryTest3),
'%s/accounts/123/webproperties?max-results=100' % self.base_url)
def testProfileFeedQuery(self):
"""Tests Profile Feed queries."""
queryTest1 = client.ProfileQuery()
self.assertEquals(str(queryTest1),
'%s/accounts/~all/webproperties/~all/profiles' % self.base_url)
queryTest2 = client.ProfileQuery('123', 'UA-123-1')
self.assertEquals(str(queryTest2),
'%s/accounts/123/webproperties/UA-123-1/profiles' % self.base_url)
queryTest3 = client.ProfileQuery('123', 'UA-123-1',
{'max-results': 100})
self.assertEquals(str(queryTest3),
'%s/accounts/123/webproperties/UA-123-1/profiles?max-results=100'
% self.base_url)
queryTest4 = client.ProfileQuery()
queryTest4.acct_id = '123'
queryTest4.web_prop_id = 'UA-123-1'
queryTest4.query['max-results'] = 100
self.assertEquals(str(queryTest4),
'%s/accounts/123/webproperties/UA-123-1/profiles?max-results=100'
% self.base_url)
def testGoalFeedQuery(self):
"""Tests Goal Feed queries."""
queryTest1 = client.GoalQuery()
self.assertEquals(str(queryTest1),
'%s/accounts/~all/webproperties/~all/profiles/~all/goals'
% self.base_url)
queryTest2 = client.GoalQuery('123', 'UA-123-1', '555')
self.assertEquals(str(queryTest2),
'%s/accounts/123/webproperties/UA-123-1/profiles/555/goals'
% self.base_url)
queryTest3 = client.GoalQuery('123', 'UA-123-1', '555',
{'max-results': 100})
self.assertEquals(str(queryTest3),
'%s/accounts/123/webproperties/UA-123-1/profiles/555/goals'
'?max-results=100' % self.base_url)
queryTest4 = client.GoalQuery()
queryTest4.acct_id = '123'
queryTest4.web_prop_id = 'UA-123-1'
queryTest4.profile_id = '555'
queryTest4.query['max-results'] = 100
self.assertEquals(str(queryTest3),
'%s/accounts/123/webproperties/UA-123-1/profiles/555/goals'
'?max-results=100' % self.base_url)
def testAdvSegQuery(self):
"""Tests Advanced Segment Feed queries."""
queryTest1 = client.AdvSegQuery()
self.assertEquals(str(queryTest1),
'%s/segments'
% self.base_url)
queryTest2 = client.AdvSegQuery({'max-results': 100})
self.assertEquals(str(queryTest2),
'%s/segments?max-results=100'
% self.base_url)
def suite():
return conf.build_suite([DataExportQueryTest,
ManagementQueryTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests for Google Analytics Data Export API and Management APIs.
Although the Data Export API and Management API conceptually operate on
different parts of Google Analytics, the APIs share some code so they
are released in the same module.
AccountFeedTest: All unit tests for AccountFeed class.
DataFeedTest: All unit tests for DataFeed class.
ManagementFeedAccountTest: Unit tests for ManagementFeed class.
ManagementFeedGoalTest: Unit tests for ManagementFeed class.
ManagementFeedAdvSegTest: Unit tests for ManagementFeed class.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import unittest
from gdata import test_data
import gdata.analytics.data
import atom.core
import gdata.test_config as conf
class AccountFeedTest(unittest.TestCase):
"""Unit test for all custom elements in the Account Feed."""
def setUp(self):
"""Retrieves the test XML feed into a AccountFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_ACCOUNT_FEED,
gdata.analytics.data.AccountFeed)
def testSegment(self):
"""Tests Segment class in Google Analytics Account Feed."""
segment = self.feed.segment[0]
self.assertEquals(segment.id, 'gaid::-11')
self.assertEquals(segment.name, 'Visits from iPhones')
def testSegmentDefinition(self):
"""Tests Definition class in Google Analytics Account Feed."""
definition = self.feed.segment[0].definition
self.assertEquals(definition.text, 'ga:operatingSystem==iPhone')
def testEntryTableId(self):
"""Tests custom classes in Google Analytics Account Feed."""
entry = self.feed.entry[0]
self.assertEquals(entry.table_id.text, 'ga:1174')
def testEntryProperty(self):
"""Tests the property classes in Google Analytics Account Feed."""
property = self.feed.entry[0].property
self.assertEquals(property[0].name, 'ga:accountId')
self.assertEquals(property[0].value, '30481')
self.assertEquals(property[1].name, 'ga:accountName')
self.assertEquals(property[1].value, 'Google Store')
self.assertEquals(property[2].name, 'ga:profileId')
self.assertEquals(property[2].value, '1174')
self.assertEquals(property[3].name, 'ga:webPropertyId')
self.assertEquals(property[3].value, 'UA-30481-1')
self.assertEquals(property[4].name, 'ga:currency')
self.assertEquals(property[4].value, 'USD')
self.assertEquals(property[5].name, 'ga:timezone')
self.assertEquals(property[5].value, 'America/Los_Angeles')
def testEntryGetProperty(self):
"""Tests GetProperty inherited class in the AccountEntry class."""
entry = self.feed.entry[0]
self.assertEquals(entry.GetProperty('ga:accountId').value, '30481')
self.assertEquals(entry.GetProperty('ga:accountName').value, 'Google Store')
self.assertEquals(entry.GetProperty('ga:profileId').value, '1174')
self.assertEquals(entry.GetProperty('ga:webPropertyId').value, 'UA-30481-1')
self.assertEquals(entry.GetProperty('ga:currency').value, 'USD')
self.assertEquals(entry.GetProperty('ga:timezone').value, 'America/Los_Angeles')
def testGoal(self):
"""Tests Goal class in Google Anlaytics Account Feed."""
goal = self.feed.entry[0].goal[0]
self.assertEquals(goal.number, '1')
self.assertEquals(goal.name, 'Completing Order')
self.assertEquals(goal.value, '10.0')
self.assertEquals(goal.active, 'true')
def testDestination(self):
"""Tests Destination class in Google Analytics Account Feed."""
destination = self.feed.entry[0].goal[0].destination
self.assertEquals(destination.expression, '/purchaseComplete.html')
self.assertEquals(destination.case_sensitive, 'false')
self.assertEquals(destination.match_type, 'regex')
self.assertEquals(destination.step1_required, 'false')
def testStep(self):
"""Tests Step class in Google Analytics Account Feed."""
step = self.feed.entry[0].goal[0].destination.step[0]
self.assertEquals(step.number, '1')
self.assertEquals(step.name, 'View Product Categories')
self.assertEquals(step.path, '/Apps|Accessories|Fun|Kid\+s|Office')
def testEngagemet(self):
"""Tests Engagement class in Google Analytics Account Feed."""
engagement = self.feed.entry[0].goal[1].engagement
self.assertEquals(engagement.type, 'timeOnSite')
self.assertEquals(engagement.comparison, '>')
self.assertEquals(engagement.threshold_value, '300')
def testCustomVariable(self):
"""Tests CustomVariable class in Google Analytics Account Feed."""
customVar = self.feed.entry[0].custom_variable[0]
self.assertEquals(customVar.index, '1')
self.assertEquals(customVar.name, 'My Custom Variable')
self.assertEquals(customVar.scope, '3')
class DataFeedTest(unittest.TestCase):
"""Unit test for all custom elements in the Data Feed."""
def setUp(self):
"""Retrieves the test XML feed into a DataFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_DATA_FEED,
gdata.analytics.data.DataFeed)
def testDataFeed(self):
"""Tests custom classes in Google Analytics Data Feed."""
self.assertEquals(self.feed.start_date.text, '2008-10-01')
self.assertEquals(self.feed.end_date.text, '2008-10-31')
def testAggregates(self):
"""Tests Aggregates class in Google Analytics Data Feed."""
self.assert_(self.feed.aggregates is not None)
def testContainsSampledData(self):
"""Tests ContainsSampledData class in Google Analytics Data Feed."""
contains_sampled_data = self.feed.contains_sampled_data.text
self.assertEquals(contains_sampled_data, 'true')
self.assertTrue(self.feed.HasSampledData())
def testAggregatesElements(self):
"""Tests Metrics class in Aggregates class."""
metric = self.feed.aggregates.metric[0]
self.assertEquals(metric.confidence_interval, '0.0')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.value, '136540')
metric = self.feed.aggregates.GetMetric('ga:visits')
self.assertEquals(metric.confidence_interval, '0.0')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.value, '136540')
def testDataSource(self):
"""Tests DataSources class in Google Analytics Data Feed."""
self.assert_(self.feed.data_source[0] is not None)
def testDataSourceTableId(self):
"""Tests TableId class in the DataSource class."""
table_id = self.feed.data_source[0].table_id
self.assertEquals(table_id.text, 'ga:1174')
def testDataSourceTableName(self):
"""Tests TableName class in the DataSource class."""
table_name = self.feed.data_source[0].table_name
self.assertEquals(table_name.text, 'www.googlestore.com')
def testDataSourceProperty(self):
"""Tests Property class in the DataSource class."""
property = self.feed.data_source[0].property
self.assertEquals(property[0].name, 'ga:profileId')
self.assertEquals(property[0].value, '1174')
self.assertEquals(property[1].name, 'ga:webPropertyId')
self.assertEquals(property[1].value, 'UA-30481-1')
self.assertEquals(property[2].name, 'ga:accountName')
self.assertEquals(property[2].value, 'Google Store')
def testDataSourceGetProperty(self):
"""Tests GetProperty utility method in the DataSource class."""
ds = self.feed.data_source[0]
self.assertEquals(ds.GetProperty('ga:profileId').value, '1174')
self.assertEquals(ds.GetProperty('ga:webPropertyId').value, 'UA-30481-1')
self.assertEquals(ds.GetProperty('ga:accountName').value, 'Google Store')
def testSegment(self):
"""Tests Segment class in DataFeed class."""
segment = self.feed.segment
self.assertEquals(segment.id, 'gaid::-11')
self.assertEquals(segment.name, 'Visits from iPhones')
def testSegmentDefinition(self):
"""Tests Definition class in Segment class."""
definition = self.feed.segment.definition
self.assertEquals(definition.text, 'ga:operatingSystem==iPhone')
def testEntryDimension(self):
"""Tests Dimension class in Entry class."""
dim = self.feed.entry[0].dimension[0]
self.assertEquals(dim.name, 'ga:source')
self.assertEquals(dim.value, 'blogger.com')
def testEntryGetDimension(self):
"""Tests GetDimension utility method in the Entry class."""
dim = self.feed.entry[0].GetDimension('ga:source')
self.assertEquals(dim.name, 'ga:source')
self.assertEquals(dim.value, 'blogger.com')
error = self.feed.entry[0].GetDimension('foo')
self.assertEquals(error, None)
def testEntryMetric(self):
"""Tests Metric class in Entry class."""
met = self.feed.entry[0].metric[0]
self.assertEquals(met.confidence_interval, '0.0')
self.assertEquals(met.name, 'ga:visits')
self.assertEquals(met.type, 'integer')
self.assertEquals(met.value, '68140')
def testEntryGetMetric(self):
"""Tests GetMetric utility method in the Entry class."""
met = self.feed.entry[0].GetMetric('ga:visits')
self.assertEquals(met.confidence_interval, '0.0')
self.assertEquals(met.name, 'ga:visits')
self.assertEquals(met.type, 'integer')
self.assertEquals(met.value, '68140')
error = self.feed.entry[0].GetMetric('foo')
self.assertEquals(error, None)
def testEntryGetObject(self):
"""Tests GetObjectOf utility method in Entry class."""
entry = self.feed.entry[0]
dimension = entry.GetObject('ga:source')
self.assertEquals(dimension.name, 'ga:source')
self.assertEquals(dimension.value, 'blogger.com')
metric = entry.GetObject('ga:visits')
self.assertEquals(metric.name, 'ga:visits')
self.assertEquals(metric.value, '68140')
self.assertEquals(metric.type, 'integer')
self.assertEquals(metric.confidence_interval, '0.0')
error = entry.GetObject('foo')
self.assertEquals(error, None)
class ManagementFeedProfileTest(unittest.TestCase):
"""Unit test for all property elements in Google Analytics Management Feed.
Since the Account, Web Property and Profile feed all have the same
structure and XML elements, this single test case covers all three feeds.
"""
def setUp(self):
"""Retrieves the test XML feed into a DataFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_MGMT_PROFILE_FEED,
gdata.analytics.data.ManagementFeed)
def testFeedKindAttribute(self):
"""Tests the kind attribute in the feed."""
self.assertEqual(self.feed.kind, 'analytics#profiles')
def testEntryKindAttribute(self):
"""tests the kind attribute in the entry."""
entry_kind = self.feed.entry[0].kind
self.assertEqual(entry_kind, 'analytics#profile')
def testEntryProperty(self):
"""Tests property classes in Managment Entry class."""
property = self.feed.entry[0].property
self.assertEquals(property[0].name, 'ga:accountId')
self.assertEquals(property[0].value, '30481')
def testEntryGetProperty(self):
"""Tests GetProperty helper method in Management Entry class."""
entry = self.feed.entry[0]
self.assertEquals(entry.GetProperty('ga:accountId').value, '30481')
def testGetParentLinks(self):
"""Tests GetParentLinks utility method."""
parent_links = self.feed.entry[0].GetParentLinks()
self.assertEquals(len(parent_links), 1)
parent_link = parent_links[0]
self.assertEquals(parent_link.rel,
'http://schemas.google.com/ga/2009#parent')
self.assertEquals(parent_link.type,
'application/atom+xml')
self.assertEquals(parent_link.href,
'https://www.google.com/analytics/feeds/datasources'
'/ga/accounts/30481/webproperties/UA-30481-1')
self.assertEquals(parent_link.target_kind,
'analytics#webproperty')
def testGetChildLinks(self):
"""Tests GetChildLinks utility method."""
child_links = self.feed.entry[0].GetChildLinks()
self.assertEquals(len(child_links), 1)
self.ChildLinkTestHelper(child_links[0])
def testGetChildLink(self):
"""Tests getChildLink utility method."""
child_link = self.feed.entry[0].GetChildLink('analytics#goals')
self.ChildLinkTestHelper(child_link)
child_link = self.feed.entry[0].GetChildLink('foo_bar')
self.assertEquals(child_link, None)
def ChildLinkTestHelper(self, child_link):
"""Common method to test a child link."""
self.assertEquals(child_link.rel,
'http://schemas.google.com/ga/2009#child')
self.assertEquals(child_link.type,
'application/atom+xml')
self.assertEquals(child_link.href,
'https://www.google.com/analytics/feeds/datasources'
'/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals')
self.assertEquals(child_link.target_kind,
'analytics#goals')
class ManagementFeedGoalTest(unittest.TestCase):
"""Unit test for all Goal elements in Management Feed."""
def setUp(self):
"""Retrieves the test XML feed into a DataFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_MGMT_GOAL_FEED,
gdata.analytics.data.ManagementFeed)
def testEntryGoal(self):
"""Tests Goal class in Google Anlaytics Account Feed."""
goal = self.feed.entry[0].goal
self.assertEquals(goal.number, '1')
self.assertEquals(goal.name, 'Completing Order')
self.assertEquals(goal.value, '10.0')
self.assertEquals(goal.active, 'true')
def testGoalDestination(self):
"""Tests Destination class in Google Analytics Account Feed."""
destination = self.feed.entry[0].goal.destination
self.assertEquals(destination.expression, '/purchaseComplete.html')
self.assertEquals(destination.case_sensitive, 'false')
self.assertEquals(destination.match_type, 'regex')
self.assertEquals(destination.step1_required, 'false')
def testGoalDestinationStep(self):
"""Tests Step class in Google Analytics Account Feed."""
step = self.feed.entry[0].goal.destination.step[0]
self.assertEquals(step.number, '1')
self.assertEquals(step.name, 'View Product Categories')
self.assertEquals(step.path, '/Apps|Accessories')
def testGoalEngagemet(self):
"""Tests Engagement class in Google Analytics Account Feed."""
engagement = self.feed.entry[1].goal.engagement
self.assertEquals(engagement.type, 'timeOnSite')
self.assertEquals(engagement.comparison, '>')
self.assertEquals(engagement.threshold_value, '300')
class ManagementFeedAdvSegTest(unittest.TestCase):
"""Unit test for all Advanced Segment elements in Management Feed."""
def setUp(self):
"""Retrieves the test XML feed into a DataFeed object."""
self.feed = atom.core.parse(test_data.ANALYTICS_MGMT_ADV_SEGMENT_FEED,
gdata.analytics.data.ManagementFeed)
def testEntrySegment(self):
"""Tests Segment class in ManagementEntry class."""
segment = self.feed.entry[0].segment
self.assertEquals(segment.id, 'gaid::0')
self.assertEquals(segment.name, 'Sources Form Google')
def testSegmentDefinition(self):
"""Tests Definition class in Segment class."""
definition = self.feed.entry[0].segment.definition
self.assertEquals(definition.text, 'ga:source=~^\Qgoogle\E')
def suite():
"""Test Account Feed, Data Feed and Management API Feeds."""
return conf.build_suite([
AccountFeedTest,
DataFeedTest,
ManagementFeedProfileTest,
ManagementFeedGoalTest,
ManagementFeedAdvSegTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional Tests for Google Analytics Account Feed and Data Feed.
AnalyticsClientTest: Tests making live requests to Google Analytics API.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import unittest
import gdata.client
import gdata.data
import gdata.gauth
import gdata.analytics.client
import gdata.test_config as conf
conf.options.register_option(conf.GA_TABLE_ID)
class AnalyticsClientTest(unittest.TestCase):
"""Tests creating an Account Feed query and making a request to the
Google Analytics Account Feed."""
def setUp(self):
"""Creates an AnalyticsClient object."""
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.analytics.client.AnalyticsClient()
self.client.http_client.debug = True
conf.configure_client(
self.client,
'AnalyticsClientTest',
self.client.auth_service)
def testAccountFeed(self):
"""Tests if the Account Feed exists."""
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'testAccountFeed')
account_query = gdata.analytics.client.AccountFeedQuery({
'max-results': '1'
})
feed = self.client.GetAccountFeed(account_query)
self.assert_(feed.entry is not None)
properties = [
'ga:accountId',
'ga:accountName',
'ga:profileId',
'ga:webPropertyId',
'ga:currency',
'ga:timezone'
]
entry = feed.entry[0]
for prop in properties:
property = entry.GetProperty(prop)
self.assertEquals(property.name, prop)
def testDataFeed(self):
"""Tests if the Data Feed exists."""
start_date = '2008-10-01'
end_date = '2008-10-02'
metrics = 'ga:visits'
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'testDataFeed')
data_query = gdata.analytics.client.DataFeedQuery({
'ids': conf.options.get_value('table_id'),
'start-date': start_date,
'end-date': end_date,
'metrics' : metrics,
'max-results': '1'
})
feed = self.client.GetDataFeed(data_query)
self.assert_(feed.entry is not None)
self.assertEquals(feed.start_date.text, start_date)
self.assertEquals(feed.end_date.text, end_date)
self.assertEquals(feed.entry[0].GetMetric(metrics).name, metrics)
def testManagementFeed(self):
"""Tests of the Management Feed exists."""
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'testManagementFeed')
account_query = gdata.analytics.client.AccountQuery()
feed = self.client.GetManagementFeed(account_query)
self.assert_(feed.entry is not None)
def tearDown(self):
"""Closes client connection."""
conf.close_client(self.client)
def suite():
return conf.build_suite([AnalyticsClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Yu-Jie Lin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'livibetter (Yu-Jie Lin)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata
from gdata import test_data
import gdata.webmastertools as webmastertools
class IndexedTest(unittest.TestCase):
def setUp(self):
self.indexed = webmastertools.Indexed()
def testToAndFromString(self):
self.indexed.text = 'true'
self.assert_(self.indexed.text == 'true')
new_indexed = webmastertools.IndexedFromString(self.indexed.ToString())
self.assert_(self.indexed.text == new_indexed.text)
class CrawledTest(unittest.TestCase):
def setUp(self):
self.crawled = webmastertools.Crawled()
def testToAndFromString(self):
self.crawled.text = 'true'
self.assert_(self.crawled.text == 'true')
new_crawled = webmastertools.CrawledFromString(self.crawled.ToString())
self.assert_(self.crawled.text == new_crawled.text)
class GeoLocationTest(unittest.TestCase):
def setUp(self):
self.geolocation = webmastertools.GeoLocation()
def testToAndFromString(self):
self.geolocation.text = 'US'
self.assert_(self.geolocation.text == 'US')
new_geolocation = webmastertools.GeoLocationFromString(
self.geolocation.ToString())
self.assert_(self.geolocation.text == new_geolocation.text)
class PreferredDomainTest(unittest.TestCase):
def setUp(self):
self.preferred_domain = webmastertools.PreferredDomain()
def testToAndFromString(self):
self.preferred_domain.text = 'none'
self.assert_(self.preferred_domain.text == 'none')
new_preferred_domain = webmastertools.PreferredDomainFromString(
self.preferred_domain.ToString())
self.assert_(self.preferred_domain.text == new_preferred_domain.text)
class CrawlRateTest(unittest.TestCase):
def setUp(self):
self.crawl_rate = webmastertools.CrawlRate()
def testToAndFromString(self):
self.crawl_rate.text = 'normal'
self.assert_(self.crawl_rate.text == 'normal')
new_crawl_rate = webmastertools.CrawlRateFromString(
self.crawl_rate.ToString())
self.assert_(self.crawl_rate.text == new_crawl_rate.text)
class EnhancedImageSearchTest(unittest.TestCase):
def setUp(self):
self.enhanced_image_search = webmastertools.EnhancedImageSearch()
def testToAndFromString(self):
self.enhanced_image_search.text = 'true'
self.assert_(self.enhanced_image_search.text == 'true')
new_enhanced_image_search = webmastertools.EnhancedImageSearchFromString(
self.enhanced_image_search.ToString())
self.assert_(self.enhanced_image_search.text ==
new_enhanced_image_search.text)
class VerifiedTest(unittest.TestCase):
def setUp(self):
self.verified = webmastertools.Verified()
def testToAndFromString(self):
self.verified.text = 'true'
self.assert_(self.verified.text == 'true')
new_verified = webmastertools.VerifiedFromString(self.verified.ToString())
self.assert_(self.verified.text == new_verified.text)
class VerificationMethodMetaTest(unittest.TestCase):
def setUp(self):
self.meta = webmastertools.VerificationMethodMeta()
def testToAndFromString(self):
self.meta.name = 'verify-vf1'
self.meta.content = 'a2Ai'
self.assert_(self.meta.name == 'verify-vf1')
self.assert_(self.meta.content == 'a2Ai')
new_meta = webmastertools.VerificationMethodMetaFromString(
self.meta.ToString())
self.assert_(self.meta.name == new_meta.name)
self.assert_(self.meta.content == new_meta.content)
class VerificationMethodTest(unittest.TestCase):
def setUp(self):
pass
def testMetaTagToAndFromString(self):
self.method = webmastertools.VerificationMethod()
self.method.type = 'metatag'
self.method.in_use = 'false'
self.assert_(self.method.type == 'metatag')
self.assert_(self.method.in_use == 'false')
self.method.meta = webmastertools.VerificationMethodMeta(name='verify-vf1',
content='a2Ai')
self.assert_(self.method.meta.name == 'verify-vf1')
self.assert_(self.method.meta.content == 'a2Ai')
new_method = webmastertools.VerificationMethodFromString(
self.method.ToString())
self.assert_(self.method.type == new_method.type)
self.assert_(self.method.in_use == new_method.in_use)
self.assert_(self.method.meta.name == new_method.meta.name)
self.assert_(self.method.meta.content == new_method.meta.content)
method = webmastertools.VerificationMethod(type='xyz')
self.assertEqual(method.type, 'xyz')
method = webmastertools.VerificationMethod()
self.assert_(method.type is None)
def testHtmlPageToAndFromString(self):
self.method = webmastertools.VerificationMethod()
self.method.type = 'htmlpage'
self.method.in_use = 'false'
self.method.text = '456456-google.html'
self.assert_(self.method.type == 'htmlpage')
self.assert_(self.method.in_use == 'false')
self.assert_(self.method.text == '456456-google.html')
self.assert_(self.method.meta is None)
new_method = webmastertools.VerificationMethodFromString(
self.method.ToString())
self.assert_(self.method.type == new_method.type)
self.assert_(self.method.in_use == new_method.in_use)
self.assert_(self.method.text == new_method.text)
self.assert_(self.method.meta is None)
def testConvertActualData(self):
feed = webmastertools.SitesFeedFromString(test_data.SITES_FEED)
self.assert_(len(feed.entry[0].verification_method) == 2)
check = 0
for method in feed.entry[0].verification_method:
self.assert_(isinstance(method, webmastertools.VerificationMethod))
if method.type == 'metatag':
self.assert_(method.in_use == 'false')
self.assert_(method.text is None)
self.assert_(method.meta.name == 'verify-v1')
self.assert_(method.meta.content == 'a2Ai')
check = check | 1
elif method.type == 'htmlpage':
self.assert_(method.in_use == 'false')
self.assert_(method.text == '456456-google.html')
check = check | 2
else:
self.fail('Wrong Verification Method: %s' % method.type)
self.assert_(check == 2 ** 2 - 1,
'Should only have two Verification Methods, metatag and htmlpage')
class MarkupLanguageTest(unittest.TestCase):
def setUp(self):
self.markup_language = webmastertools.MarkupLanguage()
def testToAndFromString(self):
self.markup_language.text = 'HTML'
self.assert_(self.markup_language.text == 'HTML')
new_markup_language = webmastertools.MarkupLanguageFromString(
self.markup_language.ToString())
self.assert_(self.markup_language.text == new_markup_language.text)
class SitemapMobileTest(unittest.TestCase):
def setUp(self):
self.sitemap_mobile = webmastertools.SitemapMobile()
def testToAndFromString(self):
self.sitemap_mobile.markup_language.append(webmastertools.MarkupLanguage(
text = 'HTML'))
self.assert_(self.sitemap_mobile.text is None)
self.assert_(self.sitemap_mobile.markup_language[0].text == 'HTML')
new_sitemap_mobile = webmastertools.SitemapMobileFromString(
self.sitemap_mobile.ToString())
self.assert_(new_sitemap_mobile.text is None)
self.assert_(self.sitemap_mobile.markup_language[0].text ==
new_sitemap_mobile.markup_language[0].text)
def testConvertActualData(self):
feed = webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED)
self.assert_(feed.sitemap_mobile.text.strip() == '')
self.assert_(len(feed.sitemap_mobile.markup_language) == 2)
check = 0
for markup_language in feed.sitemap_mobile.markup_language:
self.assert_(isinstance(markup_language, webmastertools.MarkupLanguage))
if markup_language.text == "HTML":
check = check | 1
elif markup_language.text == "WAP":
check = check | 2
else:
self.fail('Unexpected markup language: %s' % markup_language.text)
self.assert_(check == 2 ** 2 - 1, "Something is wrong with markup language")
class SitemapMobileMarkupLanguageTest(unittest.TestCase):
def setUp(self):
self.sitemap_mobile_markup_language =\
webmastertools.SitemapMobileMarkupLanguage()
def testToAndFromString(self):
self.sitemap_mobile_markup_language.text = 'HTML'
self.assert_(self.sitemap_mobile_markup_language.text == 'HTML')
new_sitemap_mobile_markup_language =\
webmastertools.SitemapMobileMarkupLanguageFromString(
self.sitemap_mobile_markup_language.ToString())
self.assert_(self.sitemap_mobile_markup_language.text ==\
new_sitemap_mobile_markup_language.text)
class PublicationLabelTest(unittest.TestCase):
def setUp(self):
self.publication_label = webmastertools.PublicationLabel()
def testToAndFromString(self):
self.publication_label.text = 'Value1'
self.assert_(self.publication_label.text == 'Value1')
new_publication_label = webmastertools.PublicationLabelFromString(
self.publication_label.ToString())
self.assert_(self.publication_label.text == new_publication_label.text)
class SitemapNewsTest(unittest.TestCase):
def setUp(self):
self.sitemap_news = webmastertools.SitemapNews()
def testToAndFromString(self):
self.sitemap_news.publication_label.append(webmastertools.PublicationLabel(
text = 'Value1'))
self.assert_(self.sitemap_news.text is None)
self.assert_(self.sitemap_news.publication_label[0].text == 'Value1')
new_sitemap_news = webmastertools.SitemapNewsFromString(
self.sitemap_news.ToString())
self.assert_(new_sitemap_news.text is None)
self.assert_(self.sitemap_news.publication_label[0].text ==
new_sitemap_news.publication_label[0].text)
def testConvertActualData(self):
feed = webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED)
self.assert_(len(feed.sitemap_news.publication_label) == 3)
check = 0
for publication_label in feed.sitemap_news.publication_label:
if publication_label.text == "Value1":
check = check | 1
elif publication_label.text == "Value2":
check = check | 2
elif publication_label.text == "Value3":
check = check | 4
else:
self.fail('Unexpected publication label: %s' % markup_language.text)
self.assert_(check == 2 ** 3 - 1,
'Something is wrong with publication label')
class SitemapNewsPublicationLabelTest(unittest.TestCase):
def setUp(self):
self.sitemap_news_publication_label =\
webmastertools.SitemapNewsPublicationLabel()
def testToAndFromString(self):
self.sitemap_news_publication_label.text = 'LabelValue'
self.assert_(self.sitemap_news_publication_label.text == 'LabelValue')
new_sitemap_news_publication_label =\
webmastertools.SitemapNewsPublicationLabelFromString(
self.sitemap_news_publication_label.ToString())
self.assert_(self.sitemap_news_publication_label.text ==\
new_sitemap_news_publication_label.text)
class SitemapLastDownloadedTest(unittest.TestCase):
def setUp(self):
self.sitemap_last_downloaded = webmastertools.SitemapLastDownloaded()
def testToAndFromString(self):
self.sitemap_last_downloaded.text = '2006-11-18T19:27:32.543Z'
self.assert_(self.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
new_sitemap_last_downloaded =\
webmastertools.SitemapLastDownloadedFromString(
self.sitemap_last_downloaded.ToString())
self.assert_(self.sitemap_last_downloaded.text ==\
new_sitemap_last_downloaded.text)
class SitemapTypeTest(unittest.TestCase):
def setUp(self):
self.sitemap_type = webmastertools.SitemapType()
def testToAndFromString(self):
self.sitemap_type.text = 'WEB'
self.assert_(self.sitemap_type.text == 'WEB')
new_sitemap_type = webmastertools.SitemapTypeFromString(
self.sitemap_type.ToString())
self.assert_(self.sitemap_type.text == new_sitemap_type.text)
class SitemapStatusTest(unittest.TestCase):
def setUp(self):
self.sitemap_status = webmastertools.SitemapStatus()
def testToAndFromString(self):
self.sitemap_status.text = 'Pending'
self.assert_(self.sitemap_status.text == 'Pending')
new_sitemap_status = webmastertools.SitemapStatusFromString(
self.sitemap_status.ToString())
self.assert_(self.sitemap_status.text == new_sitemap_status.text)
class SitemapUrlCountTest(unittest.TestCase):
def setUp(self):
self.sitemap_url_count = webmastertools.SitemapUrlCount()
def testToAndFromString(self):
self.sitemap_url_count.text = '0'
self.assert_(self.sitemap_url_count.text == '0')
new_sitemap_url_count = webmastertools.SitemapUrlCountFromString(
self.sitemap_url_count.ToString())
self.assert_(self.sitemap_url_count.text == new_sitemap_url_count.text)
class SitesEntryTest(unittest.TestCase):
def setUp(self):
pass
def testToAndFromString(self):
entry = webmastertools.SitesEntry(
indexed=webmastertools.Indexed(text='true'),
crawled=webmastertools.Crawled(text='2008-09-14T08:59:28.000'),
geolocation=webmastertools.GeoLocation(text='US'),
preferred_domain=webmastertools.PreferredDomain(text='none'),
crawl_rate=webmastertools.CrawlRate(text='normal'),
enhanced_image_search=webmastertools.EnhancedImageSearch(text='true'),
verified=webmastertools.Verified(text='false'),
)
self.assert_(entry.indexed.text == 'true')
self.assert_(entry.crawled.text == '2008-09-14T08:59:28.000')
self.assert_(entry.geolocation.text == 'US')
self.assert_(entry.preferred_domain.text == 'none')
self.assert_(entry.crawl_rate.text == 'normal')
self.assert_(entry.enhanced_image_search.text == 'true')
self.assert_(entry.verified.text == 'false')
new_entry = webmastertools.SitesEntryFromString(entry.ToString())
self.assert_(new_entry.indexed.text == 'true')
self.assert_(new_entry.crawled.text == '2008-09-14T08:59:28.000')
self.assert_(new_entry.geolocation.text == 'US')
self.assert_(new_entry.preferred_domain.text == 'none')
self.assert_(new_entry.crawl_rate.text == 'normal')
self.assert_(new_entry.enhanced_image_search.text == 'true')
self.assert_(new_entry.verified.text == 'false')
def testConvertActualData(self):
feed = webmastertools.SitesFeedFromString(test_data.SITES_FEED)
self.assert_(len(feed.entry) == 1)
entry = feed.entry[0]
self.assert_(isinstance(entry, webmastertools.SitesEntry))
self.assert_(entry.indexed.text == 'true')
self.assert_(entry.crawled.text == '2008-09-14T08:59:28.000')
self.assert_(entry.geolocation.text == 'US')
self.assert_(entry.preferred_domain.text == 'none')
self.assert_(entry.crawl_rate.text == 'normal')
self.assert_(entry.enhanced_image_search.text == 'true')
self.assert_(entry.verified.text == 'false')
class SitesFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.webmastertools.SitesFeedFromString(test_data.SITES_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 1)
for entry in self.feed.entry:
self.assert_(isinstance(entry, webmastertools.SitesEntry))
new_feed = webmastertools.SitesFeedFromString(self.feed.ToString())
self.assert_(len(new_feed.entry) == 1)
for entry in new_feed.entry:
self.assert_(isinstance(entry, webmastertools.SitesEntry))
class SitemapsEntryTest(unittest.TestCase):
def testRegularToAndFromString(self):
entry = webmastertools.SitemapsEntry(
sitemap_type=webmastertools.SitemapType(text='WEB'),
sitemap_status=webmastertools.SitemapStatus(text='Pending'),
sitemap_last_downloaded=webmastertools.SitemapLastDownloaded(
text='2006-11-18T19:27:32.543Z'),
sitemap_url_count=webmastertools.SitemapUrlCount(text='102'),
)
self.assert_(entry.sitemap_type.text == 'WEB')
self.assert_(entry.sitemap_status.text == 'Pending')
self.assert_(entry.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
self.assert_(entry.sitemap_url_count.text == '102')
new_entry = webmastertools.SitemapsEntryFromString(entry.ToString())
self.assert_(new_entry.sitemap_type.text == 'WEB')
self.assert_(new_entry.sitemap_status.text == 'Pending')
self.assert_(new_entry.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
self.assert_(new_entry.sitemap_url_count.text == '102')
def testConvertActualData(self):
feed = gdata.webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED)
self.assert_(len(feed.entry) == 3)
for entry in feed.entry:
self.assert_(entry, webmastertools.SitemapsEntry)
self.assert_(entry.sitemap_status, webmastertools.SitemapStatus)
self.assert_(entry.sitemap_last_downloaded,
webmastertools.SitemapLastDownloaded)
self.assert_(entry.sitemap_url_count, webmastertools.SitemapUrlCount)
self.assert_(entry.sitemap_status.text == 'StatusValue')
self.assert_(entry.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
self.assert_(entry.sitemap_url_count.text == '102')
if entry.id.text == 'http://www.example.com/sitemap-index.xml':
self.assert_(entry.sitemap_type, webmastertools.SitemapType)
self.assert_(entry.sitemap_type.text == 'WEB')
self.assert_(entry.sitemap_mobile_markup_language is None)
self.assert_(entry.sitemap_news_publication_label is None)
elif entry.id.text == 'http://www.example.com/mobile/sitemap-index.xml':
self.assert_(entry.sitemap_mobile_markup_language,
webmastertools.SitemapMobileMarkupLanguage)
self.assert_(entry.sitemap_mobile_markup_language.text == 'HTML')
self.assert_(entry.sitemap_type is None)
self.assert_(entry.sitemap_news_publication_label is None)
elif entry.id.text == 'http://www.example.com/news/sitemap-index.xml':
self.assert_(entry.sitemap_news_publication_label,
webmastertools.SitemapNewsPublicationLabel)
self.assert_(entry.sitemap_news_publication_label.text == 'LabelValue')
self.assert_(entry.sitemap_type is None)
self.assert_(entry.sitemap_mobile_markup_language is None)
class SitemapsFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.webmastertools.SitemapsFeedFromString(
test_data.SITEMAPS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 3)
for entry in self.feed.entry:
self.assert_(isinstance(entry, webmastertools.SitemapsEntry))
new_feed = webmastertools.SitemapsFeedFromString(self.feed.ToString())
self.assert_(len(new_feed.entry) == 3)
for entry in new_feed.entry:
self.assert_(isinstance(entry, webmastertools.SitemapsEntry))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
__author__ = "James Sams <sams.james@gmail.com>"
import unittest
from gdata import test_data
import gdata.books
import atom
class BookEntryTest(unittest.TestCase):
def testBookEntryFromString(self):
entry = gdata.books.Book.FromString(test_data.BOOK_ENTRY)
self.assert_(isinstance(entry, gdata.books.Book))
self.assertEquals([x.text for x in entry.creator], ['John Rawls'])
self.assertEquals(entry.date.text, '1999')
self.assertEquals(entry.format.text, '538 pages')
self.assertEquals([x.text for x in entry.identifier],
['b7GZr5Btp30C', 'ISBN:0198250541', 'ISBN:9780198250548'])
self.assertEquals([x.text for x in entry.publisher],
['Oxford University Press'])
self.assertEquals(entry.subject, None)
self.assertEquals([x.text for x in entry.dc_title],
['A theory of justice'])
self.assertEquals(entry.viewability.value,
'http://schemas.google.com/books/2008#view_partial')
self.assertEquals(entry.embeddability.value,
'http://schemas.google.com/books/2008#embeddable')
self.assertEquals(entry.review, None)
self.assertEquals([getattr(entry.rating, x) for x in
("min", "max", "average", "value")], ['1', '5', '4.00', None])
self.assertEquals(entry.GetThumbnailLink().href,
'http://bks0.books.google.com/books?id=b7GZr5Btp30C&printsec=frontcover&img=1&zoom=5&sig=ACfU3U121bWZsbjBfVwVRSK2o982jJTd1w&source=gbs_gdata')
self.assertEquals(entry.GetInfoLink().href,
'http://books.google.com/books?id=b7GZr5Btp30C&ie=ISO-8859-1&source=gbs_gdata')
self.assertEquals(entry.GetPreviewLink(), None)
self.assertEquals(entry.GetAnnotationLink().href,
'http://www.google.com/books/feeds/users/me/volumes')
self.assertEquals(entry.get_google_id(), 'b7GZr5Btp30C')
def testBookFeedFromString(self):
feed = gdata.books.BookFeed.FromString(test_data.BOOK_FEED)
self.assert_(isinstance(feed, gdata.books.BookFeed))
self.assertEquals( len(feed.entry), 1)
self.assert_(isinstance(feed.entry[0], gdata.books.Book))
def testBookEntryToDict(self):
book = gdata.books.Book()
book.dc_title.append(gdata.books.Title(text='a'))
book.dc_title.append(gdata.books.Title(text='b'))
book.dc_title.append(gdata.books.Title(text='c'))
self.assertEqual(book.to_dict()['title'], 'a b c')
if __name__ == "__main__":
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.spreadsheets.data
import gdata.test_config as conf
import atom.core
SPREADSHEET = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='"BxAUSQUJRCp7ImBq"'>
<id>http://spreadsheets.google.com/feeds/spreadsheets/private/full/key</id>
<updated>2006-11-17T18:24:18.231Z</updated>
<title type="text">Groceries R Us</title>
<content type="text">Groceries R Us</content>
<link rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/spreadsheets/private/full/key"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
</entry>"""
WORKSHEETS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"D0cERnk-eip7ImA9WBBXGEg."'>
<id>http://spreadsheets.google.com/feeds/worksheets/key/private/full</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Groceries R Us</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<link
rel="http://schemas.google.com/g/2005#post" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/key/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry gd:etag='"YDwqeyI."'>
<id>http://spreadsheets.google.com/feeds/worksheets/0/private/full/1</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<content type="text">Sheet1</content>
<link rel="http://schemas.google.com/spreadsheets/2006#listfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/0/1/private/full"/>
<link rel="http://schemas.google.com/spreadsheets/2006#cellsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/0/1/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/0/private/full/1"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/.../0/.../1/version"/>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
</entry>
</feed>"""
NEW_WORKSHEET = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<title>Expenses</title>
<gs:rowCount>50</gs:rowCount>
<gs:colCount>10</gs:colCount>
</entry>"""
EDIT_WORKSHEET = """<entry>
<id>
http://spreadsheets.google.com/feeds/worksheets/k/private/full/w
</id>
<updated>2007-07-30T18:51:30.666Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#worksheet"/>
<title type="text">Income</title>
<content type="text">Expenses</content>
<link rel="http://schemas.google.com/spreadsheets/2006#listfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<link rel="http://schemas.google.com/spreadsheets/2006#cellsfeed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/worksheets/k/private/full/w"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/.../k/private/full/w/v"/>
<gs:rowCount>45</gs:rowCount>
<gs:colCount>15</gs:colCount>
</entry>"""
NEW_TABLE = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<title type='text'>Table 1</title>
<summary type='text'>This is a list of all who have registered to vote and
whether or not they qualify to vote.</summary>
<gs:worksheet name='Sheet1' />
<gs:header row='1' />
<gs:data numRows='0' startRow='2'>
<gs:column index='B' name='Birthday' />
<gs:column index='C' name='Age' />
<gs:column index='A' name='Name' />
<gs:column index='D' name='CanVote' />
</gs:data>
</entry>"""
TABLES_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"DEQHQn84fCt7ImA9WxJTGEU."'>
<id>
http://spreadsheets.google.com/feeds/key/tables</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#table' />
<title>Sample table and record feed</title>
<link rel='alternate' type='text/html'
href='http://spreadsheets.google.com/ccc?key=key' />
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables' />
<author>
<name>Liz</name>
<email>liz@gmail.com</email>
</author>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag='"HBcUVgtWASt7ImBq"'>
<id>
http://spreadsheets.google.com/feeds/key/tables/0</id>
<updated>2009-04-28T01:20:32.707Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T01:20:32.707Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#table' />
<title>Table 1</title>
<summary>This is a list of all who have registered to vote and
whether or not they qualify to vote.</summary>
<content type='application/atom+xml;type=feed'
src='http://spreadsheets.google.com/feeds/key/records/0' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/0' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/0' />
<gs:worksheet name='Sheet1' />
<gs:header row='1' />
<gs:data insertionMode='overwrite' numRows='2' startRow='2'>
<gs:column index='B' name='Birthday' />
<gs:column index='C' name='Age' />
<gs:column index='A' name='Name' />
<gs:column index='D' name='CanVote' />
</gs:data>
</entry>
<entry gd:etag='"HBcUVgdCGyt7ImBq"'>
<id>
http://spreadsheets.google.com/feeds/key/tables/1</id>
<updated>2009-04-28T01:20:38.313Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T01:20:38.313Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#table' />
<title>Table 2</title>
<summary>List of detailed information about each voter.</summary>
<content type='application/atom+xml;type=feed'
src='http://spreadsheets.google.com/feeds/key/records/1' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/1' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/tables/1' />
<gs:worksheet name='Sheet1' />
<gs:header row='30' />
<gs:data insertionMode='overwrite' numRows='10' startRow='34'>
<gs:column index='C' name='Last' />
<gs:column index='B' name='First' />
<gs:column index='D' name='DOB' />
<gs:column index='E' name='Driver License?' />
</gs:data>
</entry>
</feed>"""
NEW_RECORD = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<title>Darcy</title>
<gs:field name='Birthday'>2/10/1785</gs:field>
<gs:field name='Age'>28</gs:field>
<gs:field name='Name'>Darcy</gs:field>
<gs:field name='CanVote'>No</gs:field>
</entry>"""
RECORDS_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"DEQHQn84fCt7ImA9WxJTGEU."'>
<id>http://spreadsheets.google.com/feeds/key/records/0</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#record' />
<title>Table 1</title>
<link rel='alternate' type='text/html'
href='http://spreadsheets.google.com/pub?key=key' />
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0' />
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0' />
<author>
<name>Liz</name>
<email>liz@gmail.com</email>
</author>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag='"UB8DTlJAKSt7ImA-WkUT"'>
<id>
http://spreadsheets.google.com/feeds/key/records/0/cn6ca</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T02:38:53.134Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#record' />
<title>Darcy</title>
<content>Birthday: 2/10/1785, Age: 28, Name: Darcy,
CanVote: No</content>
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cn6ca' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cn6ca' />
<gs:field index='B' name='Birthday'>2/10/1785</gs:field>
<gs:field index='C' name='Age'>28</gs:field>
<gs:field index='A' name='Name'>Darcy</gs:field>
<gs:field index='D' name='CanVote'>No</gs:field>
</entry>
<entry gd:etag='"UVBFUEcNRCt7ImA9DU8."'>
<id>
http://spreadsheets.google.com/feeds/key/records/0/cokwr</id>
<updated>2009-04-28T02:38:53.134Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">
2009-04-28T02:38:53.134Z</app:edited>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/spreadsheets/2006#record' />
<title>Jane</title>
<content>Birthday: 1/6/1791, Age: 22, Name: Jane,
CanVote: Yes</content>
<link rel='self' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cokwr' />
<link rel='edit' type='application/atom+xml'
href='http://spreadsheets.google.com/feeds/key/records/0/cokwr' />
<gs:field index='B' name='Birthday'>1/6/1791</gs:field>
<gs:field index='C' name='Age'>22</gs:field>
<gs:field index='A' name='Name'>Jane</gs:field>
<gs:field index='D' name='CanVote'>Yes</gs:field>
</entry>
</feed>"""
LIST_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"D0cERnk-eip7ImA9WBBXGEg."'>
<id>
http://spreadsheets.google.com/feeds/list/key/worksheetId/private/full
</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:totalResults>8</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>8</openSearch:itemsPerPage>
<entry gd:etag='"S0wCTlpIIip7ImA0X0QI"'>
<id>http://spreadsheets.google.com/feeds/list/k/w/private/full/r</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Bingley</title>
<content type="text">Hours: 10, Items: 2, IPM: 0.0033</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r/v"/>
<gsx:name>Bingley</gsx:name>
<gsx:hours>10</gsx:hours>
<gsx:items>2</gsx:items>
<gsx:ipm>0.0033</gsx:ipm>
</entry>
<entry gd:etag='"AxQDSXxjfyp7ImA0ChJVSBI."'>
<id>
http://spreadsheets.google.com/feeds/list/k/w/private/full/rowId
</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Charlotte</title>
<content type="text">Hours: 60, Items: 18000, IPM: 5</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r/v"/>
<gsx:name>Charlotte</gsx:name>
<gsx:hours>60</gsx:hours>
<gsx:items>18000</gsx:items>
<gsx:ipm>5</gsx:ipm>
</entry>
</feed>"""
NEW_ROW = """<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended">
<gsx:hours>1</gsx:hours>
<gsx:ipm>1</gsx:ipm>
<gsx:items>60</gsx:items>
<gsx:name>Elizabeth Bennet</gsx:name>
</entry>"""
UPDATED_ROW = """<entry gd:etag='"S0wCTlpIIip7ImA0X0QI"'
xmlns="http://www.w3.org/2005/Atom"
xmlns:gd="http://schemas.google.com/g/2005"
xmlns:gsx="http://schemas.google.com/spreadsheets/2006/extended">
<id>http://spreadsheets.google.com/feeds/list/k/w/private/full/rowId</id>
<updated>2006-11-17T18:23:45.173Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#list"/>
<title type="text">Bingley</title>
<content type="text">Hours: 10, Items: 2, IPM: 0.0033</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/list/k/w/private/full/r/v"/>
<gsx:name>Bingley</gsx:name>
<gsx:hours>20</gsx:hours>
<gsx:items>4</gsx:items>
<gsx:ipm>0.0033</gsx:ipm>
</entry>"""
CELLS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:gd="http://schemas.google.com/g/2005"
gd:etag='W/"D0cERnk-eip7ImA9WBBXGEg."'>
<id>
http://spreadsheets.google.com/feeds/cells/key/worksheetId/private/full
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<title type="text">Sheet1</title>
<link rel="alternate" type="text/html"
href="http://spreadsheets.google.com/ccc?key=key"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
<link rel="http://schemas.google.com/g/2005#batch"
type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full/batch"/>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/private/full"/>
<author>
<name>Fitzwilliam Darcy</name>
<email>fitz@gmail.com</email>
</author>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>36</openSearch:itemsPerPage>
<gs:rowCount>100</gs:rowCount>
<gs:colCount>20</gs:colCount>
<entry gd:etag='"ImA9D1APFyp7"'>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/R1C1
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">A1</title>
<content type="text">Name</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/pr/full/R1C1"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/./cells/k/w/pr/full/R1C1/bgvjf"/>
<gs:cell row="1" col="1" inputValue="Name">Name</gs:cell>
</entry>
<entry gd:etag='"YD0PS1YXByp7Ig.."'>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/R1C2
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">B1</title>
<content type="text">Hours</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/pr/full/R1C2"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/./cells/k/w/pr/full/R1C2/1pn567"/>
<gs:cell row="1" col="2" inputValue="Hours">Hours</gs:cell>
</entry>
<entry gd:etag='"ImB5CBYSRCp7"'>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/R9C4
</id>
<updated>2006-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#cell"/>
<title type="text">D9</title>
<content type="text">5</content>
<link rel="self" type="application/atom+xml"
href="http://spreadsheets.google.com/feeds/cells/k/w/pr/full/R9C4"/>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets.google.com/./cells/k/w/pr/full/R9C4/srevc"/>
<gs:cell row="9" col="4"
inputValue="=FLOOR(R[0]C[-1]/(R[0]C[-2]*60),.0001)"
numericValue="5.0">5</gs:cell>
</entry>
</feed>"""
BATCH_CELLS = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:gs="http://schemas.google.com/spreadsheets/2006">
<id>
http://spreadsheets.google.com/feeds/cells/key/worksheetId/private/full
</id>
<entry>
<batch:id">A1</batch:id">
<batch:operation type="update"/>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/cellId
</id>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets/google.com/./cells/k/w/pr/full/R2C4/v"/>
<gs:cell row="2" col="4" inputValue="newData"/>
</entry>
<entry>
<batch:id">A2</batch:id">
<batch:operation type="update"/>
<title type="text">A2</title>
<id>
http://spreadsheets.google.com/feeds/cells/k/w/private/full/cellId
</id>
<link rel="edit" type="application/atom+xml"
href="http://spreadsheets/google.com/feeds/cells/k/w/pr/full/R2C5/v"/>
<gs:cell row="2" col="5" inputValue="moreInfo"/>
</entry>
</feed>"""
class SpreadsheetEntryTest(unittest.TestCase):
def setUp(self):
self.spreadsheet = atom.core.parse(
SPREADSHEET, gdata.spreadsheets.data.Spreadsheet)
def test_check_parsing(self):
self.assertEqual(self.spreadsheet.etag, '"BxAUSQUJRCp7ImBq"')
self.assertEqual(self.spreadsheet.id.text,
'http://spreadsheets.google.com/feeds/spreadsheets'
'/private/full/key')
self.assertEqual(self.spreadsheet.updated.text,
'2006-11-17T18:24:18.231Z')
self.assertEqual(self.spreadsheet.find_worksheets_feed(),
'http://spreadsheets.google.com/feeds/worksheets'
'/key/private/full')
self.assertEqual(self.spreadsheet.find_self_link(),
'http://spreadsheets.google.com/feeds/spreadsheets'
'/private/full/key')
class ListEntryTest(unittest.TestCase):
def test_get_and_set_column_value(self):
row = atom.core.parse(NEW_ROW, gdata.spreadsheets.data.ListEntry)
row.set_value('hours', '3')
row.set_value('name', 'Lizzy')
self.assertEqual(row.get_value('hours'), '3')
self.assertEqual(row.get_value('ipm'), '1')
self.assertEqual(row.get_value('items'), '60')
self.assertEqual(row.get_value('name'), 'Lizzy')
self.assertEqual(row.get_value('x'), None)
row.set_value('x', 'Test')
self.assertEqual(row.get_value('x'), 'Test')
row_xml = str(row)
self.assert_(row_xml.find(':x') > -1)
self.assert_(row_xml.find('>Test</') > -1)
self.assert_(row_xml.find(':hours') > -1)
self.assert_(row_xml.find('>3</') > -1)
self.assert_(row_xml.find(':ipm') > -1)
self.assert_(row_xml.find('>1</') > -1)
self.assert_(row_xml.find(':items') > -1)
self.assert_(row_xml.find('>60</') > -1)
self.assert_(row_xml.find(':name') > -1)
self.assert_(row_xml.find('>Lizzy</') > -1)
self.assertEqual(row_xml.find(':zzz'), -1)
self.assertEqual(row_xml.find('>foo</'), -1)
def test_check_parsing(self):
row = atom.core.parse(NEW_ROW, gdata.spreadsheets.data.ListEntry)
self.assertEqual(row.get_value('hours'), '1')
self.assertEqual(row.get_value('ipm'), '1')
self.assertEqual(row.get_value('items'), '60')
self.assertEqual(row.get_value('name'), 'Elizabeth Bennet')
self.assertEqual(row.get_value('none'), None)
row = atom.core.parse(UPDATED_ROW, gdata.spreadsheets.data.ListEntry)
self.assertEqual(row.get_value('hours'), '20')
self.assertEqual(row.get_value('ipm'), '0.0033')
self.assertEqual(row.get_value('items'), '4')
self.assertEqual(row.get_value('name'), 'Bingley')
self.assertEqual(row.get_value('x'), None)
self.assertEqual(
row.id.text, 'http://spreadsheets.google.com/feeds/list'
'/k/w/private/full/rowId')
self.assertEqual(row.updated.text, '2006-11-17T18:23:45.173Z')
self.assertEqual(row.content.text, 'Hours: 10, Items: 2, IPM: 0.0033')
class RecordEntryTest(unittest.TestCase):
def setUp(self):
self.records = atom.core.parse(
RECORDS_FEED, gdata.spreadsheets.data.RecordsFeed)
def test_get_by_index(self):
self.assertEqual(self.records.entry[0].field[0].index, 'B')
self.assertEqual(self.records.entry[0].field[0].name, 'Birthday')
self.assertEqual(self.records.entry[0].field[0].text, '2/10/1785')
self.assertEqual(self.records.entry[0].value_for_index('B'), '2/10/1785')
self.assertRaises(gdata.spreadsheets.data.FieldMissing,
self.records.entry[0].ValueForIndex, 'E')
self.assertEqual(self.records.entry[1].value_for_index('D'), 'Yes')
def test_get_by_name(self):
self.assertEqual(self.records.entry[0].ValueForName('Birthday'),
'2/10/1785')
self.assertRaises(gdata.spreadsheets.data.FieldMissing,
self.records.entry[0].value_for_name, 'Foo')
self.assertEqual(self.records.entry[1].value_for_name('Age'), '22')
class DataClassSanityTest(unittest.TestCase):
def test_basic_element_structure(self):
conf.check_data_classes(self, [
gdata.spreadsheets.data.Cell, gdata.spreadsheets.data.ColCount,
gdata.spreadsheets.data.Field, gdata.spreadsheets.data.Column,
gdata.spreadsheets.data.Data, gdata.spreadsheets.data.Header,
gdata.spreadsheets.data.RowCount, gdata.spreadsheets.data.Worksheet,
gdata.spreadsheets.data.Spreadsheet,
gdata.spreadsheets.data.SpreadsheetsFeed,
gdata.spreadsheets.data.WorksheetEntry,
gdata.spreadsheets.data.WorksheetsFeed,
gdata.spreadsheets.data.Table,
gdata.spreadsheets.data.TablesFeed,
gdata.spreadsheets.data.Record,
gdata.spreadsheets.data.RecordsFeed,
gdata.spreadsheets.data.ListRow,
gdata.spreadsheets.data.ListEntry,
gdata.spreadsheets.data.ListsFeed,
gdata.spreadsheets.data.CellEntry,
gdata.spreadsheets.data.CellsFeed])
def suite():
return conf.build_suite([SpreadsheetEntryTest, DataClassSanityTest,
ListEntryTest, RecordEntryTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.spreadsheets.client
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
import gdata.test_config as conf
conf.options.register_option(conf.SPREADSHEET_ID_OPTION)
class SpreadsheetsClientTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.spreadsheets.client.SpreadsheetsClient()
conf.configure_client(self.client, 'SpreadsheetsClientTest', 'wise')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete_worksheet(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete_worksheet')
spreadsheet_id = conf.options.get_value('spreadsheetid')
original_worksheets = self.client.get_worksheets(spreadsheet_id)
self.assert_(isinstance(original_worksheets,
gdata.spreadsheets.data.WorksheetsFeed))
worksheet_count = int(original_worksheets.total_results.text)
# Add a new worksheet to the spreadsheet.
created = self.client.add_worksheet(
spreadsheet_id, 'a test worksheet', 4, 8)
self.assert_(isinstance(created,
gdata.spreadsheets.data.WorksheetEntry))
self.assertEqual(created.title.text, 'a test worksheet')
self.assertEqual(created.row_count.text, '4')
self.assertEqual(created.col_count.text, '8')
# There should now be one more worksheet in this spreadsheet.
updated_worksheets = self.client.get_worksheets(spreadsheet_id)
new_worksheet_count = int(updated_worksheets.total_results.text)
self.assertEqual(worksheet_count + 1, new_worksheet_count)
# Delete our test worksheet.
self.client.delete(created)
# We should be back to the original number of worksheets.
updated_worksheets = self.client.get_worksheets(spreadsheet_id)
new_worksheet_count = int(updated_worksheets.total_results.text)
self.assertEqual(worksheet_count, new_worksheet_count)
def test_create_update_delete_table_and_records(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(
self.client, 'test_create_update_delete_table_and_records')
spreadsheet_id = conf.options.get_value('spreadsheetid')
tables = self.client.get_tables(spreadsheet_id)
test_worksheet = self.client.add_worksheet(
spreadsheet_id, 'worksheet x', rows=30, cols=3)
self.assert_(isinstance(tables, gdata.spreadsheets.data.TablesFeed))
initial_count = tables.total_results.text
created_table = self.client.add_table(
spreadsheet_id, 'Test Table', 'This table is for testing',
'worksheet x', header_row=5, num_rows=10, start_row=8,
insertion_mode=None,
column_headers={'B': 'Food', 'C': 'Drink', 'A': 'Price'})
# Re-get the list of tables and make sure there are more now.
updated_tables = self.client.get_tables(spreadsheet_id)
self.assertEqual(int(initial_count) + 1,
int(updated_tables.total_results.text))
# Get the records in our new table to make sure it has the correct
# number of records.
table_num = int(created_table.get_table_id())
starting_records = self.client.get_records(spreadsheet_id, table_num)
self.assertEqual(starting_records.total_results.text, '10')
self.assert_(starting_records.entry[0].field[0].text is None)
self.assert_(starting_records.entry[0].field[1].text is None)
self.assert_(starting_records.entry[1].field[0].text is None)
self.assert_(starting_records.entry[1].field[1].text is None)
record1 = self.client.add_record(
spreadsheet_id, table_num,
{'Food': 'Cheese', 'Drink': 'Soda', 'Price': '2.99'}, 'icky')
self.client.add_record(spreadsheet_id, table_num,
{'Food': 'Eggs', 'Drink': 'Milk'})
self.client.add_record(spreadsheet_id, table_num,
{'Food': 'Spinach', 'Drink': 'Water'})
updated_records = self.client.get_records(spreadsheet_id, table_num)
self.assertEqual(updated_records.entry[10].value_for_name('Price'), '2.99')
self.assertEqual(updated_records.entry[10].value_for_index('A'), '2.99')
self.assertEqual(updated_records.entry[10].value_for_name('Drink'),
'Soda')
self.assert_(updated_records.entry[11].value_for_name('Price') is None)
self.assertEqual(updated_records.entry[11].value_for_name('Drink'),
'Milk')
self.assertEqual(updated_records.entry[12].value_for_name('Drink'),
'Water')
self.assert_(updated_records.entry[1].value_for_index('A') is None)
self.assert_(updated_records.entry[2].value_for_index('B') is None)
self.assert_(updated_records.entry[3].value_for_index('C') is None)
# Cleanup the table.
self.client.delete(created_table)
# Delete the test worksheet in which the table was placed.
self.client.delete(test_worksheet)
# Make sure we are back to the original count.
updated_tables = self.client.get_tables(spreadsheet_id)
self.assertEqual(int(initial_count),
int(updated_tables.total_results.text))
def suite():
return conf.build_suite([SpreadsheetsClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import sys
import unittest
import getopt
import getpass
import module_test_runner
import run_data_tests
import run_service_tests
if __name__ == '__main__':
run_data_tests.RunAllTests()
run_service_tests.GetValuesForTestSettingsAndRunAllTests()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import coverage
import all_tests
import atom.core
import atom.http_core
import atom.mock_http_core
import atom.auth
import atom.client
import gdata.gauth
import gdata.client
import gdata.data
import gdata.blogger.data
import gdata.blogger.client
import gdata.spreadsheets.data
from gdata.test_config import settings
# Ensure that coverage tests execute the live requests to the servers, but
# allow use of cached server responses to speed up repeated runs.
settings.RUN_LIVE_TESTS = True
settings.CLEAR_CACHE = False
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
coverage.erase()
coverage.start()
unittest.TextTestRunner().run(all_tests.suite())
coverage.stop()
coverage.report([atom.core, atom.http_core, atom.auth, atom.data,
atom.mock_http_core, atom.client, gdata.gauth, gdata.client,
gdata.core, gdata.data, gdata.blogger.data, gdata.blogger.client,
gdata.spreadsheets.data])
| Python |
#!/usr/bin/python
# -*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import sys
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
from gdata import test_data
import gdata.test_config as conf
class AuthorTest(unittest.TestCase):
def setUp(self):
self.author = atom.Author()
def testEmptyAuthorShouldHaveEmptyExtensionsList(self):
self.assert_(isinstance(self.author.extension_elements, list))
self.assert_(len(self.author.extension_elements) == 0)
def testNormalAuthorShouldHaveNoExtensionElements(self):
self.author.name = atom.Name(text='Jeff Scudder')
self.assert_(self.author.name.text == 'Jeff Scudder')
self.assert_(len(self.author.extension_elements) == 0)
new_author = atom.AuthorFromString(self.author.ToString())
self.assert_(len(self.author.extension_elements) == 0)
self.author.extension_elements.append(atom.ExtensionElement(
'foo', text='bar'))
self.assert_(len(self.author.extension_elements) == 1)
self.assert_(self.author.name.text == 'Jeff Scudder')
new_author = atom.AuthorFromString(self.author.ToString())
self.assert_(len(self.author.extension_elements) == 1)
self.assert_(new_author.name.text == 'Jeff Scudder')
def testEmptyAuthorToAndFromStringShouldMatch(self):
string_from_author = self.author.ToString()
new_author = atom.AuthorFromString(string_from_author)
string_from_new_author = new_author.ToString()
self.assert_(string_from_author == string_from_new_author)
def testAuthorWithNameToAndFromStringShouldMatch(self):
self.author.name = atom.Name()
self.author.name.text = 'Jeff Scudder'
string_from_author = self.author.ToString()
new_author = atom.AuthorFromString(string_from_author)
string_from_new_author = new_author.ToString()
self.assert_(string_from_author == string_from_new_author)
self.assert_(self.author.name.text == new_author.name.text)
def testExtensionElements(self):
self.author.extension_attributes['foo1'] = 'bar'
self.author.extension_attributes['foo2'] = 'rab'
self.assert_(self.author.extension_attributes['foo1'] == 'bar')
self.assert_(self.author.extension_attributes['foo2'] == 'rab')
new_author = atom.AuthorFromString(self.author.ToString())
self.assert_(new_author.extension_attributes['foo1'] == 'bar')
self.assert_(new_author.extension_attributes['foo2'] == 'rab')
def testConvertFullAuthorToAndFromString(self):
author = atom.AuthorFromString(test_data.TEST_AUTHOR)
self.assert_(author.name.text == 'John Doe')
self.assert_(author.email.text == 'johndoes@someemailadress.com')
self.assert_(author.uri.text == 'http://www.google.com')
class EmailTest(unittest.TestCase):
def setUp(self):
self.email = atom.Email()
def testEmailToAndFromString(self):
self.email.text = 'This is a test'
new_email = atom.EmailFromString(self.email.ToString())
self.assert_(self.email.text == new_email.text)
self.assert_(self.email.extension_elements ==
new_email.extension_elements)
class NameTest(unittest.TestCase):
def setUp(self):
self.name = atom.Name()
def testEmptyNameToAndFromStringShouldMatch(self):
string_from_name = self.name.ToString()
new_name = atom.NameFromString(string_from_name)
string_from_new_name = new_name.ToString()
self.assert_(string_from_name == string_from_new_name)
def testText(self):
self.assert_(self.name.text is None)
self.name.text = 'Jeff Scudder'
self.assert_(self.name.text == 'Jeff Scudder')
new_name = atom.NameFromString(self.name.ToString())
self.assert_(new_name.text == self.name.text)
def testExtensionElements(self):
self.name.extension_attributes['foo'] = 'bar'
self.assert_(self.name.extension_attributes['foo'] == 'bar')
new_name = atom.NameFromString(self.name.ToString())
self.assert_(new_name.extension_attributes['foo'] == 'bar')
class ExtensionElementTest(unittest.TestCase):
def setUp(self):
self.ee = atom.ExtensionElement('foo')
def testEmptyEEShouldProduceEmptyString(self):
pass
def testEEParsesTreeCorrectly(self):
deep_tree = atom.ExtensionElementFromString(test_data.EXTENSION_TREE)
self.assert_(deep_tree.tag == 'feed')
self.assert_(deep_tree.namespace == 'http://www.w3.org/2005/Atom')
self.assert_(deep_tree.children[0].tag == 'author')
self.assert_(deep_tree.children[0].namespace == 'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].tag == 'name')
self.assert_(deep_tree.children[0].children[0].namespace ==
'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].text.strip() == 'John Doe')
self.assert_(deep_tree.children[0].children[0].children[0].text.strip() ==
'Bar')
foo = deep_tree.children[0].children[0].children[0]
self.assert_(foo.tag == 'foo')
self.assert_(foo.namespace == 'http://www.google.com')
self.assert_(foo.attributes['up'] == 'down')
self.assert_(foo.attributes['yes'] == 'no')
self.assert_(foo.children == [])
def testEEToAndFromStringShouldMatch(self):
string_from_ee = self.ee.ToString()
new_ee = atom.ExtensionElementFromString(string_from_ee)
string_from_new_ee = new_ee.ToString()
self.assert_(string_from_ee == string_from_new_ee)
deep_tree = atom.ExtensionElementFromString(test_data.EXTENSION_TREE)
string_from_deep_tree = deep_tree.ToString()
new_deep_tree = atom.ExtensionElementFromString(string_from_deep_tree)
string_from_new_deep_tree = new_deep_tree.ToString()
self.assert_(string_from_deep_tree == string_from_new_deep_tree)
class LinkTest(unittest.TestCase):
def setUp(self):
self.link = atom.Link()
def testLinkToAndFromString(self):
self.link.href = 'test href'
self.link.hreflang = 'english'
self.link.type = 'text/html'
self.link.extension_attributes['foo'] = 'bar'
self.assert_(self.link.href == 'test href')
self.assert_(self.link.hreflang == 'english')
self.assert_(self.link.type == 'text/html')
self.assert_(self.link.extension_attributes['foo'] == 'bar')
new_link = atom.LinkFromString(self.link.ToString())
self.assert_(self.link.href == new_link.href)
self.assert_(self.link.type == new_link.type)
self.assert_(self.link.hreflang == new_link.hreflang)
self.assert_(self.link.extension_attributes['foo'] ==
new_link.extension_attributes['foo'])
def testLinkType(self):
test_link = atom.Link(link_type='text/html')
self.assert_(test_link.type == 'text/html')
class GeneratorTest(unittest.TestCase):
def setUp(self):
self.generator = atom.Generator()
def testGeneratorToAndFromString(self):
self.generator.uri = 'www.google.com'
self.generator.version = '1.0'
self.generator.extension_attributes['foo'] = 'bar'
self.assert_(self.generator.uri == 'www.google.com')
self.assert_(self.generator.version == '1.0')
self.assert_(self.generator.extension_attributes['foo'] == 'bar')
new_generator = atom.GeneratorFromString(self.generator.ToString())
self.assert_(self.generator.uri == new_generator.uri)
self.assert_(self.generator.version == new_generator.version)
self.assert_(self.generator.extension_attributes['foo'] ==
new_generator.extension_attributes['foo'])
class TitleTest(unittest.TestCase):
def setUp(self):
self.title = atom.Title()
def testTitleToAndFromString(self):
self.title.type = 'text'
self.title.text = 'Less: <'
self.assert_(self.title.type == 'text')
self.assert_(self.title.text == 'Less: <')
new_title = atom.TitleFromString(self.title.ToString())
self.assert_(self.title.type == new_title.type)
self.assert_(self.title.text == new_title.text)
class SubtitleTest(unittest.TestCase):
def setUp(self):
self.subtitle = atom.Subtitle()
def testTitleToAndFromString(self):
self.subtitle.type = 'text'
self.subtitle.text = 'sub & title'
self.assert_(self.subtitle.type == 'text')
self.assert_(self.subtitle.text == 'sub & title')
new_subtitle = atom.SubtitleFromString(self.subtitle.ToString())
self.assert_(self.subtitle.type == new_subtitle.type)
self.assert_(self.subtitle.text == new_subtitle.text)
class SummaryTest(unittest.TestCase):
def setUp(self):
self.summary = atom.Summary()
def testTitleToAndFromString(self):
self.summary.type = 'text'
self.summary.text = 'Less: <'
self.assert_(self.summary.type == 'text')
self.assert_(self.summary.text == 'Less: <')
new_summary = atom.SummaryFromString(self.summary.ToString())
self.assert_(self.summary.type == new_summary.type)
self.assert_(self.summary.text == new_summary.text)
class CategoryTest(unittest.TestCase):
def setUp(self):
self.category = atom.Category()
def testCategoryToAndFromString(self):
self.category.term = 'x'
self.category.scheme = 'y'
self.category.label = 'z'
self.assert_(self.category.term == 'x')
self.assert_(self.category.scheme == 'y')
self.assert_(self.category.label == 'z')
new_category = atom.CategoryFromString(self.category.ToString())
self.assert_(self.category.term == new_category.term)
self.assert_(self.category.scheme == new_category.scheme)
self.assert_(self.category.label == new_category.label)
class ContributorTest(unittest.TestCase):
def setUp(self):
self.contributor = atom.Contributor()
def testContributorToAndFromString(self):
self.contributor.name = atom.Name(text='J Scud')
self.contributor.email = atom.Email(text='nobody@nowhere')
self.contributor.uri = atom.Uri(text='http://www.google.com')
self.assert_(self.contributor.name.text == 'J Scud')
self.assert_(self.contributor.email.text == 'nobody@nowhere')
self.assert_(self.contributor.uri.text == 'http://www.google.com')
new_contributor = atom.ContributorFromString(self.contributor.ToString())
self.assert_(self.contributor.name.text == new_contributor.name.text)
self.assert_(self.contributor.email.text == new_contributor.email.text)
self.assert_(self.contributor.uri.text == new_contributor.uri.text)
class IdTest(unittest.TestCase):
def setUp(self):
self.my_id = atom.Id()
def testIdToAndFromString(self):
self.my_id.text = 'my nifty id'
self.assert_(self.my_id.text == 'my nifty id')
new_id = atom.IdFromString(self.my_id.ToString())
self.assert_(self.my_id.text == new_id.text)
class IconTest(unittest.TestCase):
def setUp(self):
self.icon = atom.Icon()
def testIconToAndFromString(self):
self.icon.text = 'my picture'
self.assert_(self.icon.text == 'my picture')
new_icon = atom.IconFromString(str(self.icon))
self.assert_(self.icon.text == new_icon.text)
class LogoTest(unittest.TestCase):
def setUp(self):
self.logo = atom.Logo()
def testLogoToAndFromString(self):
self.logo.text = 'my logo'
self.assert_(self.logo.text == 'my logo')
new_logo = atom.LogoFromString(self.logo.ToString())
self.assert_(self.logo.text == new_logo.text)
class RightsTest(unittest.TestCase):
def setUp(self):
self.rights = atom.Rights()
def testContributorToAndFromString(self):
self.rights.text = 'you have the right to remain silent'
self.rights.type = 'text'
self.assert_(self.rights.text == 'you have the right to remain silent')
self.assert_(self.rights.type == 'text')
new_rights = atom.RightsFromString(self.rights.ToString())
self.assert_(self.rights.text == new_rights.text)
self.assert_(self.rights.type == new_rights.type)
class UpdatedTest(unittest.TestCase):
def setUp(self):
self.updated = atom.Updated()
def testUpdatedToAndFromString(self):
self.updated.text = 'my time'
self.assert_(self.updated.text == 'my time')
new_updated = atom.UpdatedFromString(self.updated.ToString())
self.assert_(self.updated.text == new_updated.text)
class PublishedTest(unittest.TestCase):
def setUp(self):
self.published = atom.Published()
def testPublishedToAndFromString(self):
self.published.text = 'pub time'
self.assert_(self.published.text == 'pub time')
new_published = atom.PublishedFromString(self.published.ToString())
self.assert_(self.published.text == new_published.text)
class FeedEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def testConvertToAndFromElementTree(self):
# Use entry because FeedEntryParent doesn't have a tag or namespace.
original = atom.Entry()
copy = atom.FeedEntryParent()
original.author.append(atom.Author(name=atom.Name(text='J Scud')))
self.assert_(original.author[0].name.text == 'J Scud')
self.assert_(copy.author == [])
original.id = atom.Id(text='test id')
self.assert_(original.id.text == 'test id')
self.assert_(copy.id is None)
copy._HarvestElementTree(original._ToElementTree())
self.assert_(original.author[0].name.text == copy.author[0].name.text)
self.assert_(original.id.text == copy.id.text)
class EntryTest(unittest.TestCase):
def testConvertToAndFromString(self):
entry = atom.Entry()
entry.author.append(atom.Author(name=atom.Name(text='js')))
entry.title = atom.Title(text='my test entry')
self.assert_(entry.author[0].name.text == 'js')
self.assert_(entry.title.text == 'my test entry')
new_entry = atom.EntryFromString(entry.ToString())
self.assert_(new_entry.author[0].name.text == 'js')
self.assert_(new_entry.title.text == 'my test entry')
def testEntryCorrectlyConvertsActualData(self):
entry = atom.EntryFromString(test_data.XML_ENTRY_1)
self.assert_(entry.category[0].scheme ==
'http://base.google.com/categories/itemtypes')
self.assert_(entry.category[0].term == 'products')
self.assert_(entry.id.text == ' http://www.google.com/test/id/url ')
self.assert_(entry.title.text == 'Testing 2000 series laptop')
self.assert_(entry.title.type == 'text')
self.assert_(entry.content.type == 'xhtml')
#TODO check all other values for the test entry
def testAppControl(self):
entry = atom.EntryFromString(test_data.TEST_BASE_ENTRY)
self.assertEquals(entry.control.draft.text, 'yes')
self.assertEquals(len(entry.control.extension_elements), 1)
self.assertEquals(entry.control.extension_elements[0].tag, 'disapproved')
class ControlTest(unittest.TestCase):
def testConvertToAndFromString(self):
control = atom.Control()
control.text = 'some text'
control.draft = atom.Draft(text='yes')
self.assertEquals(control.draft.text, 'yes')
self.assertEquals(control.text, 'some text')
self.assertEquals(isinstance(control.draft, atom.Draft), True)
new_control = atom.ControlFromString(str(control))
self.assertEquals(control.draft.text, new_control.draft.text)
self.assertEquals(control.text, new_control.text)
self.assertEquals(isinstance(new_control.draft, atom.Draft), True)
class DraftTest(unittest.TestCase):
def testConvertToAndFromString(self):
draft = atom.Draft()
draft.text = 'maybe'
draft.extension_attributes['foo'] = 'bar'
self.assertEquals(draft.text, 'maybe')
self.assertEquals(draft.extension_attributes['foo'], 'bar')
new_draft = atom.DraftFromString(str(draft))
self.assertEquals(draft.text, new_draft.text)
self.assertEquals(draft.extension_attributes['foo'],
new_draft.extension_attributes['foo'])
class SourceTest(unittest.TestCase):
def testConvertToAndFromString(self):
source = atom.Source()
source.author.append(atom.Author(name=atom.Name(text='js')))
source.title = atom.Title(text='my test source')
source.generator = atom.Generator(text='gen')
self.assert_(source.author[0].name.text == 'js')
self.assert_(source.title.text == 'my test source')
self.assert_(source.generator.text == 'gen')
new_source = atom.SourceFromString(source.ToString())
self.assert_(new_source.author[0].name.text == 'js')
self.assert_(new_source.title.text == 'my test source')
self.assert_(new_source.generator.text == 'gen')
class FeedTest(unittest.TestCase):
def testConvertToAndFromString(self):
feed = atom.Feed()
feed.author.append(atom.Author(name=atom.Name(text='js')))
feed.title = atom.Title(text='my test source')
feed.generator = atom.Generator(text='gen')
feed.entry.append(atom.Entry(author=[atom.Author(name=atom.Name(
text='entry author'))]))
self.assert_(feed.author[0].name.text == 'js')
self.assert_(feed.title.text == 'my test source')
self.assert_(feed.generator.text == 'gen')
self.assert_(feed.entry[0].author[0].name.text == 'entry author')
new_feed = atom.FeedFromString(feed.ToString())
self.assert_(new_feed.author[0].name.text == 'js')
self.assert_(new_feed.title.text == 'my test source')
self.assert_(new_feed.generator.text == 'gen')
self.assert_(new_feed.entry[0].author[0].name.text == 'entry author')
def testPreserveEntryOrder(self):
test_xml = (
'<feed xmlns="http://www.w3.org/2005/Atom">'
'<entry><id>0</id></entry>'
'<entry><id>1</id></entry>'
'<title>Testing Order</title>'
'<entry><id>2</id></entry>'
'<entry><id>3</id></entry>'
'<entry><id>4</id></entry>'
'<entry><id>5</id></entry>'
'<entry><id>6</id></entry>'
'<entry><id>7</id></entry>'
'<author/>'
'<entry><id>8</id></entry>'
'<id>feed_id</id>'
'<entry><id>9</id></entry>'
'</feed>')
feed = atom.FeedFromString(test_xml)
for i in xrange(10):
self.assert_(feed.entry[i].id.text == str(i))
feed = atom.FeedFromString(feed.ToString())
for i in xrange(10):
self.assert_(feed.entry[i].id.text == str(i))
temp = feed.entry[3]
feed.entry[3] = feed.entry[4]
feed.entry[4] = temp
self.assert_(feed.entry[2].id.text == '2')
self.assert_(feed.entry[3].id.text == '4')
self.assert_(feed.entry[4].id.text == '3')
self.assert_(feed.entry[5].id.text == '5')
feed = atom.FeedFromString(feed.ToString())
self.assert_(feed.entry[2].id.text == '2')
self.assert_(feed.entry[3].id.text == '4')
self.assert_(feed.entry[4].id.text == '3')
self.assert_(feed.entry[5].id.text == '5')
class ContentEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def setUp(self):
self.content = atom.Content()
def testConvertToAndFromElementTree(self):
self.content.text = 'my content'
self.content.type = 'text'
self.content.src = 'my source'
self.assert_(self.content.text == 'my content')
self.assert_(self.content.type == 'text')
self.assert_(self.content.src == 'my source')
new_content = atom.ContentFromString(self.content.ToString())
self.assert_(self.content.text == new_content.text)
self.assert_(self.content.type == new_content.type)
self.assert_(self.content.src == new_content.src)
def testContentConstructorSetsSrc(self):
new_content = atom.Content(src='abcd')
self.assertEquals(new_content.src, 'abcd')
class PreserveUnkownElementTest(unittest.TestCase):
"""Tests correct preservation of XML elements which are non Atom"""
def setUp(self):
self.feed = atom.FeedFromString(test_data.GBASE_ATTRIBUTE_FEED)
def testCaptureOpenSearchElements(self):
self.assertEquals(self.feed.FindExtensions('totalResults')[0].tag,
'totalResults')
self.assertEquals(self.feed.FindExtensions('totalResults')[0].namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
open_search_extensions = self.feed.FindExtensions(
namespace='http://a9.com/-/spec/opensearchrss/1.0/')
self.assertEquals(len(open_search_extensions), 3)
for element in open_search_extensions:
self.assertEquals(element.namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
def testCaptureMetaElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_elements), 1)
self.assertEquals(meta_elements[0].attributes['count'], '4416629')
self.assertEquals(len(meta_elements[0].children), 10)
def testCaptureMetaChildElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
meta_children = meta_elements[0].FindChildren(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_children), 10)
for child in meta_children:
self.assertEquals(child.tag, 'value')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = atom.EntryFromString(test_data.XML_ENTRY_1)
def testLinkFinderGetsLicenseLink(self):
self.assertEquals(isinstance(self.entry.GetLicenseLink(), atom.Link),
True)
self.assertEquals(self.entry.GetLicenseLink().href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.GetLicenseLink().rel, 'license')
def testLinkFinderGetsAlternateLink(self):
self.assertEquals(isinstance(self.entry.GetAlternateLink(), atom.Link),
True)
self.assertEquals(self.entry.GetAlternateLink().href,
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.GetAlternateLink().rel, 'alternate')
class AtomBaseTest(unittest.TestCase):
def testAtomBaseConvertsExtensions(self):
# Using Id because it adds no additional members.
atom_base = atom.Id()
extension_child = atom.ExtensionElement('foo', namespace='http://ns0.com')
extension_grandchild = atom.ExtensionElement('bar',
namespace='http://ns0.com')
extension_child.children.append(extension_grandchild)
atom_base.extension_elements.append(extension_child)
self.assertEquals(len(atom_base.extension_elements), 1)
self.assertEquals(len(atom_base.extension_elements[0].children), 1)
self.assertEquals(atom_base.extension_elements[0].tag, 'foo')
self.assertEquals(atom_base.extension_elements[0].children[0].tag, 'bar')
element_tree = atom_base._ToElementTree()
self.assert_(element_tree.find('{http://ns0.com}foo') is not None)
self.assert_(element_tree.find('{http://ns0.com}foo').find(
'{http://ns0.com}bar') is not None)
class UtfParsingTest(unittest.TestCase):
def setUp(self):
self.test_xml = u"""<?xml version="1.0" encoding="utf-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'>
<id>http://www.google.com/test/id/url</id>
<title type='\u03B1\u03BB\u03C6\u03B1'>\u03B1\u03BB\u03C6\u03B1</title>
</entry>"""
def testMemberStringEncoding(self):
atom_entry = atom.EntryFromString(self.test_xml)
#self.assertEqual(atom_entry.title.type.encode('utf-8'),
# u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8'))
#self.assertEqual(atom_entry.title.text.encode('utf-8'),
# u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8'))
# Setting object members to unicode strings is supported even if
# MEMBER_STRING_ENCODING is set 'utf-8' (should it be?)
atom_entry.title.type = u'\u03B1\u03BB\u03C6\u03B1'
xml = atom_entry.ToString()
self.assert_(u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8') in xml)
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is utf8
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
# Test something else than utf-8
atom.MEMBER_STRING_ENCODING = 'iso8859_7'
atom_entry = atom.EntryFromString(self.test_xml)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1'.encode(
'iso8859_7'))
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1'.encode(
'iso8859_7'))
# Test using unicode strings directly for object members
atom.MEMBER_STRING_ENCODING = unicode
atom_entry = atom.EntryFromString(self.test_xml)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is
# unicode
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
def testConvertExampleXML(self):
try:
entry = atom.CreateClassFromXMLString(atom.Entry,
test_data.GBASE_STRING_ENCODING_ENTRY)
except UnicodeDecodeError:
self.fail('Error when converting XML')
class DeprecationDecoratorTest(unittest.TestCase):
def testDeprecationWarning(self):
def to_deprecate():
return 5
self.assertEqual(to_deprecate.func_name, 'to_deprecate')
deprecated = atom.deprecated('test')(to_deprecate)
self.assertNotEqual(to_deprecate, deprecated)
# After decorating a function as deprecated, the function name should
# still be the name of the original function.
self.assertEqual(deprecated.func_name, 'to_deprecate')
#@atom.deprecated()
def also_deprecated():
return 6
also_deprecated = atom.deprecated()(also_deprecated)
self.assertEqual(also_deprecated.func_name, 'also_deprecated')
def suite():
return conf.build_suite([AuthorTest, EmailTest, NameTest,
ExtensionElementTest, LinkTest, GeneratorTest, TitleTest, SubtitleTest,
SummaryTest, IdTest, IconTest, LogoTest, RightsTest, UpdatedTest,
PublishedTest, FeedEntryParentTest, EntryTest, ContentEntryParentTest,
PreserveUnkownElementTest, FeedTest, LinkFinderTest, AtomBaseTest,
UtfParsingTest, DeprecationDecoratorTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.url
import gdata.test_config as conf
class UrlTest(unittest.TestCase):
def testParseUrl(self):
url = atom.url.parse_url('http://www.google.com/calendar/feeds')
self.assert_(url.protocol == 'http')
self.assert_(url.port is None)
self.assert_(url.host == 'www.google.com')
self.assert_(url.path == '/calendar/feeds')
self.assert_(url.params == {})
url = atom.url.parse_url('http://example.com:6091/calendar/feeds')
self.assert_(url.protocol == 'http')
self.assert_(url.host == 'example.com')
self.assert_(url.port == '6091')
self.assert_(url.path == '/calendar/feeds')
self.assert_(url.params == {})
url = atom.url.parse_url('/calendar/feeds?foo=bar')
self.assert_(url.protocol is None)
self.assert_(url.host is None)
self.assert_(url.path == '/calendar/feeds')
self.assert_(len(url.params.keys()) == 1)
self.assert_('foo' in url.params)
self.assert_(url.params['foo'] == 'bar')
url = atom.url.parse_url('/calendar/feeds?my+foo=bar%3Dx')
self.assert_(len(url.params.keys()) == 1)
self.assert_('my foo' in url.params)
self.assert_(url.params['my foo'] == 'bar=x')
def testUrlToString(self):
url = atom.url.Url(port=80)
url.host = 'example.com'
self.assert_(str(url), '//example.com:80')
url = atom.url.Url(protocol='http', host='example.com', path='/feed')
url.params['has spaces'] = 'sneaky=values?&!'
self.assert_(url.to_string() == (
'http://example.com/feed?has+spaces=sneaky%3Dvalues%3F%26%21'))
def testGetRequestUri(self):
url = atom.url.Url(protocol='http', host='example.com', path='/feed')
url.params['has spaces'] = 'sneaky=values?&!'
self.assert_(url.get_request_uri() == (
'/feed?has+spaces=sneaky%3Dvalues%3F%26%21'))
self.assert_(url.get_param_string() == (
'has+spaces=sneaky%3Dvalues%3F%26%21'))
def testComparistons(self):
url1 = atom.url.Url(protocol='http', host='example.com', path='/feed',
params={'x':'1', 'y':'2'})
url2 = atom.url.Url(host='example.com', port=80, path='/feed',
params={'y':'2', 'x':'1'})
self.assertEquals(url1, url2)
url3 = atom.url.Url(host='example.com', port=81, path='/feed',
params={'x':'1', 'y':'2'})
self.assert_(url1 != url3)
self.assert_(url2 != url3)
url4 = atom.url.Url(protocol='ftp', host='example.com', path='/feed',
params={'x':'1', 'y':'2'})
self.assert_(url1 != url4)
self.assert_(url2 != url4)
self.assert_(url3 != url4)
def suite():
return conf.build_suite([UrlTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.mock_http
import atom.http
class MockHttpClientUnitTest(unittest.TestCase):
def setUp(self):
self.client = atom.mock_http.MockHttpClient()
def testRepondToGet(self):
mock_response = atom.http_interface.HttpResponse(body='Hooray!',
status=200, reason='OK')
self.client.add_response(mock_response, 'GET',
'http://example.com/hooray')
response = self.client.request('GET', 'http://example.com/hooray')
self.assertEquals(len(self.client.recordings), 1)
self.assertEquals(response.status, 200)
self.assertEquals(response.read(), 'Hooray!')
def testRecordResponse(self):
# Turn on pass-through record mode.
self.client.real_client = atom.http.ProxiedHttpClient()
live_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
live_response_body = live_response.read()
self.assertEquals(live_response.status, 200)
self.assertEquals(live_response_body.startswith('<?xml'), True)
# Requery for the now canned data.
self.client.real_client = None
canned_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
# The canned response should be the stored response.
canned_response_body = canned_response.read()
self.assertEquals(canned_response.status, 200)
self.assertEquals(canned_response_body, live_response_body)
def testUnrecordedRequest(self):
try:
self.client.request('POST', 'http://example.org')
self.fail()
except atom.mock_http.NoRecordingFound:
pass
def suite():
return unittest.TestSuite(
(unittest.makeSuite(MockHttpClientUnitTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.core
import gdata.test_config as conf
SAMPLE_XML = ('<outer xmlns="http://example.com/xml/1" '
'xmlns:two="http://example.com/xml/2">'
'<inner x="123"/>'
'<inner x="234" y="abc"/>'
'<inner>'
'<two:nested>Some Test</two:nested>'
'<nested>Different Namespace</nested>'
'</inner>'
'<other two:z="true"></other>'
'</outer>')
NO_NAMESPACE_XML = ('<foo bar="123"><baz>Baz Text!</baz></foo>')
V1_XML = ('<able xmlns="http://example.com/1" '
'xmlns:ex="http://example.com/ex/1">'
'<baker foo="42"/>'
'<ex:charlie>Greetings!</ex:charlie>'
'<same xmlns="http://example.com/s" x="true">'
'</able>')
V2_XML = ('<alpha xmlns="http://example.com/2" '
'xmlns:ex="http://example.com/ex/2">'
'<bravo bar="42"/>'
'<ex:charlie>Greetings!</ex:charlie>'
'<same xmlns="http://example.com/s" x="true">'
'</alpha>')
class Child(atom.core.XmlElement):
_qname = ('{http://example.com/1}child', '{http://example.com/2}child')
class Foo(atom.core.XmlElement):
_qname = 'foo'
class Example(atom.core.XmlElement):
_qname = '{http://example.com}foo'
child = Child
foos = [Foo]
tag = 'tag'
versioned_attr = ('attr', '{http://new_ns}attr')
# Example XmlElement subclass declarations.
class Inner(atom.core.XmlElement):
_qname = '{http://example.com/xml/1}inner'
my_x = 'x'
class Outer(atom.core.XmlElement):
_qname = '{http://example.com/xml/1}outer'
innards = [Inner]
class XmlElementTest(unittest.TestCase):
def testGetQName(self):
class Unversioned(atom.core.XmlElement):
_qname = '{http://example.com}foo'
class Versioned(atom.core.XmlElement):
_qname = ('{http://example.com/1}foo', '{http://example.com/2}foo')
self.assert_(
atom.core._get_qname(Unversioned, 1) == '{http://example.com}foo')
self.assert_(
atom.core._get_qname(Unversioned, 2) == '{http://example.com}foo')
self.assert_(
atom.core._get_qname(Versioned, 1) == '{http://example.com/1}foo')
self.assert_(
atom.core._get_qname(Versioned, 2) == '{http://example.com/2}foo')
def testConstructor(self):
e = Example()
self.assert_(e.child is None)
self.assert_(e.tag is None)
self.assert_(e.versioned_attr is None)
self.assert_(e.foos == [])
self.assert_(e.text is None)
def testGetRules(self):
rules1 = Example._get_rules(1)
self.assert_(rules1[0] == '{http://example.com}foo')
self.assert_(rules1[1]['{http://example.com/1}child'] == ('child', Child,
False))
self.assert_(rules1[1]['foo'] == ('foos', Foo, True))
self.assert_(rules1[2]['tag'] == 'tag')
self.assert_(rules1[2]['attr'] == 'versioned_attr')
# Check to make sure we don't recalculate the rules.
self.assert_(rules1 == Example._get_rules(1))
rules2 = Example._get_rules(2)
self.assert_(rules2[0] == '{http://example.com}foo')
self.assert_(rules2[1]['{http://example.com/2}child'] == ('child', Child,
False))
self.assert_(rules2[1]['foo'] == ('foos', Foo, True))
self.assert_(rules2[2]['tag'] == 'tag')
self.assert_(rules2[2]['{http://new_ns}attr'] == 'versioned_attr')
def testGetElements(self):
e = Example()
e.child = Child()
e.child.text = 'child text'
e.foos.append(Foo())
e.foos[0].text = 'foo1'
e.foos.append(Foo())
e.foos[1].text = 'foo2'
e._other_elements.append(atom.core.XmlElement())
e._other_elements[0]._qname = 'bar'
e._other_elements[0].text = 'other1'
e._other_elements.append(atom.core.XmlElement())
e._other_elements[1]._qname = 'child'
e._other_elements[1].text = 'other2'
self.contains_expected_elements(e.get_elements(),
['foo1', 'foo2', 'child text', 'other1', 'other2'])
self.contains_expected_elements(e.get_elements('child'),
['child text', 'other2'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/1'), ['child text'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2'), [])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2', 2), ['child text'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/1', 2), [])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2', 3), ['child text'])
self.contains_expected_elements(e.get_elements('bar'), ['other1'])
self.contains_expected_elements(e.get_elements('bar', version=2),
['other1'])
self.contains_expected_elements(e.get_elements('bar', version=3),
['other1'])
def contains_expected_elements(self, elements, expected_texts):
self.assert_(len(elements) == len(expected_texts))
for element in elements:
self.assert_(element.text in expected_texts)
def testConstructorKwargs(self):
e = Example('hello', child=Child('world'), versioned_attr='1')
self.assert_(e.text == 'hello')
self.assert_(e.child.text == 'world')
self.assert_(e.versioned_attr == '1')
self.assert_(e.foos == [])
self.assert_(e.tag is None)
e = Example(foos=[Foo('1', ignored=1), Foo(text='2')], tag='ok')
self.assert_(e.text is None)
self.assert_(e.child is None)
self.assert_(e.versioned_attr is None)
self.assert_(len(e.foos) == 2)
self.assert_(e.foos[0].text == '1')
self.assert_(e.foos[1].text == '2')
self.assert_('ignored' not in e.foos[0].__dict__)
self.assert_(e.tag == 'ok')
def testParseBasicXmlElement(self):
element = atom.core.xml_element_from_string(SAMPLE_XML,
atom.core.XmlElement)
inners = element.get_elements('inner')
self.assert_(len(inners) == 3)
self.assert_(inners[0].get_attributes('x')[0].value == '123')
self.assert_(inners[0].get_attributes('y') == [])
self.assert_(inners[1].get_attributes('x')[0].value == '234')
self.assert_(inners[1].get_attributes('y')[0].value == 'abc')
self.assert_(inners[2].get_attributes('x') == [])
inners = element.get_elements('inner', 'http://example.com/xml/1')
self.assert_(len(inners) == 3)
inners = element.get_elements(None, 'http://example.com/xml/1')
self.assert_(len(inners) == 4)
inners = element.get_elements()
self.assert_(len(inners) == 4)
inners = element.get_elements('other')
self.assert_(len(inners) == 1)
self.assert_(inners[0].get_attributes(
'z', 'http://example.com/xml/2')[0].value == 'true')
inners = element.get_elements('missing')
self.assert_(len(inners) == 0)
def testBasicXmlElementPreservesMarkup(self):
element = atom.core.xml_element_from_string(SAMPLE_XML,
atom.core.XmlElement)
tree1 = ElementTree.fromstring(SAMPLE_XML)
tree2 = ElementTree.fromstring(element.to_string())
self.assert_trees_similar(tree1, tree2)
def testSchemaParse(self):
outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer)
self.assert_(isinstance(outer.innards, list))
self.assert_(len(outer.innards) == 3)
self.assert_(outer.innards[0].my_x == '123')
def testSchemaParsePreservesMarkup(self):
outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer)
tree1 = ElementTree.fromstring(SAMPLE_XML)
tree2 = ElementTree.fromstring(outer.to_string())
self.assert_trees_similar(tree1, tree2)
found_x_and_y = False
found_x_123 = False
child = tree1.find('{http://example.com/xml/1}inner')
matching_children = tree2.findall(child.tag)
for match in matching_children:
if 'y' in match.attrib and match.attrib['y'] == 'abc':
if match.attrib['x'] == '234':
found_x_and_y = True
self.assert_(match.attrib['x'] == '234')
if 'x' in match.attrib and match.attrib['x'] == '123':
self.assert_('y' not in match.attrib)
found_x_123 = True
self.assert_(found_x_and_y)
self.assert_(found_x_123)
def testGenericTagAndNamespace(self):
element = atom.core.XmlElement(text='content')
# Try setting tag then namespace.
element.tag = 'foo'
self.assert_(element._qname == 'foo')
element.namespace = 'http://example.com/ns'
self.assert_(element._qname == '{http://example.com/ns}foo')
element = atom.core.XmlElement()
# Try setting namespace then tag.
element.namespace = 'http://example.com/ns'
self.assert_(element._qname == '{http://example.com/ns}')
element.tag = 'foo'
self.assert_(element._qname == '{http://example.com/ns}foo')
def assert_trees_similar(self, a, b):
"""Compares two XML trees for approximate matching."""
for child in a:
self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag)))
for child in b:
self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag)))
self.assert_(len(a) == len(b))
self.assert_(a.text == b.text)
self.assert_(a.attrib == b.attrib)
class UtilityFunctionTest(unittest.TestCase):
def testMatchQnames(self):
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, None, '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, None, 'foo'))
self.assert_(atom.core._qname_matches(
None, None, None))
self.assert_(atom.core._qname_matches(
None, None, '{http://example.com}'))
self.assert_(atom.core._qname_matches(
'foo', None, '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, 'http://example.com', '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', 'http://google.com', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://example.com}bar') == False)
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://google.com}foo') == False)
self.assert_(atom.core._qname_matches(
'bar', 'http://example.com', '{http://google.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', None, '{http://example.com}bar') == False)
self.assert_(atom.core._qname_matches(
None, 'http://google.com', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
None, '', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', '', 'bar') == False)
class Chars(atom.core.XmlElement):
_qname = u'{http://example.com/}chars'
y = 'y'
alpha = 'a'
class Strs(atom.core.XmlElement):
_qname = '{http://example.com/}strs'
chars = [Chars]
delta = u'd'
def parse(string):
return atom.core.xml_element_from_string(string, atom.core.XmlElement)
def create(tag, string):
element = atom.core.XmlElement(text=string)
element._qname = tag
return element
class CharacterEncodingTest(unittest.TestCase):
def testUnicodeInputString(self):
# Test parsing the inner text.
self.assertEqual(parse(u'<x>δ</x>').text, u'\u03b4')
self.assertEqual(parse(u'<x>\u03b4</x>').text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse(u'<x>δ</x>').to_string(), '<x>δ</x>')
self.assertEqual(parse(u'<x>\u03b4</x>').to_string(), '<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create(u'x', u'\u03b4')
self.assertEqual(e.to_string(), '<x>δ</x>')
self.assertEqual(e.text, u'\u03b4')
self.assert_(isinstance(e.text, unicode))
self.assertEqual(create(u'x', '\xce\xb4'.decode('utf-8')).to_string(),
'<x>δ</x>')
def testUnicodeTagsAndAttributes(self):
# Begin with test to show underlying ElementTree behavior.
t = ElementTree.fromstring(u'<del\u03b4ta>test</del\u03b4ta>'.encode('utf-8'))
self.assertEqual(t.tag, u'del\u03b4ta')
self.assertEqual(parse(u'<\u03b4elta>test</\u03b4elta>')._qname,
u'\u03b4elta')
# Test unicode attribute names and values.
t = ElementTree.fromstring(u'<x \u03b4a="\u03b4b" />'.encode('utf-8'))
self.assertEqual(t.attrib, {u'\u03b4a': u'\u03b4b'})
self.assertEqual(parse(u'<x \u03b4a="\u03b4b" />').get_attributes(
u'\u03b4a')[0].value, u'\u03b4b')
x = create('x', None)
x._other_attributes[u'a'] = u'\u03b4elta'
self.assert_(x.to_string().startswith('<x a="δelta"'))
def testUtf8InputString(self):
# Test parsing inner text.
self.assertEqual(parse('<x>δ</x>').text, u'\u03b4')
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-8')).text, u'\u03b4')
self.assertEqual(parse('<x>\xce\xb4</x>').text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse('<x>δ</x>').to_string(), '<x>δ</x>')
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-8')).to_string(),
'<x>δ</x>')
self.assertEqual(parse('<x>\xce\xb4</x>').to_string(), '<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create('x', '\xce\xb4')
self.assertEqual(e.to_string(), '<x>δ</x>')
# Don't change the encoding until the we convert to an XML string.
self.assertEqual(e.text, '\xce\xb4')
self.assert_(isinstance(e.text, str))
self.assert_(isinstance(e.to_string(), str))
self.assertEqual(create('x', u'\u03b4'.encode('utf-8')).to_string(),
'<x>δ</x>')
# Test attributes and values with UTF-8 inputs.
self.assertEqual(parse('<x \xce\xb4a="\xce\xb4b" />').get_attributes(
u'\u03b4a')[0].value, u'\u03b4b')
def testUtf8TagsAndAttributes(self):
self.assertEqual(
parse(u'<\u03b4elta>test</\u03b4elta>'.encode('utf-8'))._qname,
u'\u03b4elta')
self.assertEqual(parse('<\xce\xb4elta>test</\xce\xb4elta>')._qname,
u'\u03b4elta')
# Test an element with UTF-8 in the attribute value.
x = create('x', None)
x._other_attributes[u'a'] = '\xce\xb4'
self.assert_(x.to_string(encoding='UTF-8').startswith('<x a="δ"'))
self.assert_(x.to_string().startswith('<x a="δ"'))
def testOtherEncodingOnInputString(self):
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
# Test parsing inner text.
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-16')).text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-16')).to_string(),
'<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create('x', u'\u03b4'.encode('utf-16'))
self.assertEqual(e.to_string(encoding='utf-16'), '<x>δ</x>')
# Don't change the encoding until the we convert to an XML string.
# Allow either little-endian or big-endian byte orderings.
self.assert_(e.text in ['\xff\xfe\xb4\x03', '\xfe\xff\x03\xb4'])
endianness = LITTLE_ENDIAN
if e.text == '\xfe\xff\x03\xb4':
endianness = BIG_ENDIAN
self.assert_(isinstance(e.text, str))
self.assert_(isinstance(e.to_string(encoding='utf-16'), str))
if endianness == LITTLE_ENDIAN:
self.assertEqual(
create('x', '\xff\xfe\xb4\x03').to_string(encoding='utf-16'),
'<x>δ</x>')
else:
self.assertEqual(
create('x', '\xfe\xff\x03\xb4').to_string(encoding='utf-16'),
'<x>δ</x>')
def testOtherEncodingInTagsAndAttributes(self):
self.assertEqual(
parse(u'<\u03b4elta>test</\u03b4elta>'.encode('utf-16'))._qname,
u'\u03b4elta')
# Test an element with UTF-16 in the attribute value.
x = create('x', None)
x._other_attributes[u'a'] = u'\u03b4'.encode('utf-16')
self.assert_(x.to_string(encoding='UTF-16').startswith('<x a="δ"'))
def suite():
return conf.build_suite([XmlElementTest, UtilityFunctionTest,
CharacterEncodingTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
import atom.http_interface
import StringIO
class HttpResponseTest(unittest.TestCase):
def testConstructorWithStrings(self):
resp = atom.http_interface.HttpResponse(body='Hi there!', status=200,
reason='OK', headers={'Content-Length':'9'})
self.assertEqual(resp.read(amt=1), 'H')
self.assertEqual(resp.read(amt=2), 'i ')
self.assertEqual(resp.read(), 'there!')
self.assertEqual(resp.read(), '')
self.assertEqual(resp.reason, 'OK')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.getheader('Content-Length'), '9')
self.assert_(resp.getheader('Missing') is None)
self.assertEqual(resp.getheader('Missing', default='yes'), 'yes')
def suite():
return unittest.TestSuite((unittest.makeSuite(HttpResponseTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# This test may make an actual HTTP request.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import StringIO
import os.path
import atom.mock_http_core
import atom.http_core
class EchoClientTest(unittest.TestCase):
def test_echo_response(self):
client = atom.mock_http_core.EchoHttpClient()
# Send a bare-bones POST request.
request = atom.http_core.HttpRequest(method='POST',
uri=atom.http_core.Uri(host='www.jeffscudder.com', path='/'))
request.add_body_part('hello world!', 'text/plain')
response = client.request(request)
self.assert_(response.getheader('Echo-Host') == 'www.jeffscudder.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') is None)
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Content-Length') == str(len(
'hello world!')))
self.assert_(response.getheader('Content-Type') == 'text/plain')
self.assert_(response.read() == 'hello world!')
# Test a path of None should default to /
request = atom.http_core.HttpRequest(method='POST',
uri=atom.http_core.Uri(host='www.jeffscudder.com', path=None))
response = client.request(request)
self.assert_(response.getheader('Echo-Host') == 'www.jeffscudder.com:None')
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Echo-Uri') == '/')
# Send a multipart request.
request = atom.http_core.HttpRequest(method='POST',
uri=atom.http_core.Uri(scheme='https', host='www.jeffscudder.com',
port=8080, path='/multipart',
query={'test': 'true', 'happy': 'yes'}),
headers={'Authorization':'Test xyzzy', 'Testing':'True'})
request.add_body_part('start', 'text/plain')
request.add_body_part(StringIO.StringIO('<html><body>hi</body></html>'),
'text/html', len('<html><body>hi</body></html>'))
request.add_body_part('alert("Greetings!")', 'text/javascript')
response = client.request(request)
self.assert_(response.getheader('Echo-Host') == 'www.jeffscudder.com:8080')
self.assert_(
response.getheader('Echo-Uri') == '/multipart?test=true&happy=yes')
self.assert_(response.getheader('Echo-Scheme') == 'https')
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Content-Type') == (
'multipart/related; boundary="%s"' % (atom.http_core.MIME_BOUNDARY,)))
expected_body = ('Media multipart posting'
'\r\n--%s\r\n'
'Content-Type: text/plain\r\n\r\n'
'start'
'\r\n--%s\r\n'
'Content-Type: text/html\r\n\r\n'
'<html><body>hi</body></html>'
'\r\n--%s\r\n'
'Content-Type: text/javascript\r\n\r\n'
'alert("Greetings!")'
'\r\n--%s--') % (atom.http_core.MIME_BOUNDARY,
atom.http_core.MIME_BOUNDARY, atom.http_core.MIME_BOUNDARY,
atom.http_core.MIME_BOUNDARY,)
self.assert_(response.read() == expected_body)
self.assert_(response.getheader('Content-Length') == str(
len(expected_body)))
class MockHttpClientTest(unittest.TestCase):
def setUp(self):
self.client = atom.mock_http_core.MockHttpClient()
def test_respond_with_recording(self):
request = atom.http_core.HttpRequest(method='GET')
atom.http_core.parse_uri('http://www.google.com/').modify_request(request)
self.client.add_response(request, 200, 'OK', body='Testing')
response = self.client.request(request)
self.assert_(response.status == 200)
self.assert_(response.reason == 'OK')
self.assert_(response.read() == 'Testing')
def test_save_and_load_recordings(self):
request = atom.http_core.HttpRequest(method='GET')
atom.http_core.parse_uri('http://www.google.com/').modify_request(request)
self.client.add_response(request, 200, 'OK', body='Testing')
response = self.client.request(request)
self.client._save_recordings('test_save_and_load_recordings')
self.client._recordings = []
try:
response = self.client.request(request)
self.fail('There should be no recording for this request.')
except atom.mock_http_core.NoRecordingFound:
pass
self.client._load_recordings('test_save_and_load_recordings')
response = self.client.request(request)
self.assert_(response.status == 200)
self.assert_(response.reason == 'OK')
self.assert_(response.read() == 'Testing')
def test_use_recordings(self):
request = atom.http_core.HttpRequest(method='GET')
atom.http_core.parse_uri('http://www.google.com/').modify_request(request)
self.client._load_or_use_client('test_use_recordings',
atom.http_core.HttpClient())
response = self.client.request(request)
if self.client.real_client:
self.client._save_recordings('test_use_recordings')
self.assert_(response.status == 200)
self.assert_(response.reason == 'OK')
self.assert_(response.getheader('server') == 'gws')
body = response.read()
self.assert_(body.startswith('<!doctype html>'))
def test_match_request(self):
x = atom.http_core.HttpRequest('http://example.com/', 'GET')
y = atom.http_core.HttpRequest('http://example.com/', 'GET')
self.assert_(atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/', 'POST')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/1', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/?gsessionid=1', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/?start_index=1', 'GET')
self.assert_(atom.mock_http_core._match_request(x, y))
x = atom.http_core.HttpRequest('http://example.com/?gsessionid=1', 'GET')
y = atom.http_core.HttpRequest('http://example.com/?gsessionid=1', 'GET')
self.assert_(atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/?gsessionid=2', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
def test_use_named_sessions(self):
self.client._delete_recordings('mock_http_test.test_use_named_sessions')
self.client.use_cached_session('mock_http_test.test_use_named_sessions',
atom.mock_http_core.EchoHttpClient())
request = atom.http_core.HttpRequest('http://example.com', 'GET')
response = self.client.request(request)
self.assertEqual(response.getheader('Echo-Method'), 'GET')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
# We will insert a Cache-Marker header to indicate that this is a
# recorded response, but initially it should not be present.
self.assertEqual(response.getheader('Cache-Marker'), None)
# Modify the recorded response to allow us to identify a cached result
# from an echoed result. We need to be able to check to see if this
# came from a recording.
self.assert_('Cache-Marker' not in self.client._recordings[0][1]._headers)
self.client._recordings[0][1]._headers['Cache-Marker'] = '1'
self.assert_('Cache-Marker' in self.client._recordings[0][1]._headers)
# Save the recorded responses.
self.client.close_session()
# Create a new client, and have it use the recorded session.
client = atom.mock_http_core.MockHttpClient()
client.use_cached_session('mock_http_test.test_use_named_sessions',
atom.mock_http_core.EchoHttpClient())
# Make the same request, which should use the recorded result.
response = client.request(request)
self.assertEqual(response.getheader('Echo-Method'), 'GET')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
# We should now see the cache marker since the response is replayed.
self.assertEqual(response.getheader('Cache-Marker'), '1')
def suite():
return unittest.TestSuite((unittest.makeSuite(MockHttpClientTest, 'test'),
unittest.makeSuite(EchoClientTest, 'test')))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
# -*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import sys
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.data
import atom.core
import gdata.test_config as conf
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
class AuthorTest(unittest.TestCase):
def setUp(self):
self.author = atom.data.Author()
def testEmptyAuthorShouldHaveEmptyExtensionLists(self):
self.assert_(isinstance(self.author._other_elements, list))
self.assertEqual(len(self.author._other_elements), 0)
self.assert_(isinstance(self.author._other_attributes, dict))
self.assertEqual(len(self.author._other_attributes), 0)
def testNormalAuthorShouldHaveNoExtensionElements(self):
self.author.name = atom.data.Name(text='Jeff Scudder')
self.assertEqual(self.author.name.text, 'Jeff Scudder')
self.assertEqual(len(self.author._other_elements), 0)
new_author = atom.core.XmlElementFromString(self.author.ToString(),
atom.data.Author)
self.assertEqual(len(new_author._other_elements), 0)
self.assertEqual(new_author.name.text, 'Jeff Scudder')
self.author.extension_elements.append(atom.data.ExtensionElement(
'foo', text='bar'))
self.assertEqual(len(self.author.extension_elements), 1)
self.assertEqual(self.author.name.text, 'Jeff Scudder')
new_author = atom.core.parse(self.author.ToString(), atom.data.Author)
self.assertEqual(len(self.author.extension_elements), 1)
self.assertEqual(new_author.name.text, 'Jeff Scudder')
def testEmptyAuthorToAndFromStringShouldMatch(self):
string_from_author = self.author.ToString()
new_author = atom.core.XmlElementFromString(string_from_author,
atom.data.Author)
string_from_new_author = new_author.ToString()
self.assertEqual(string_from_author, string_from_new_author)
def testAuthorWithNameToAndFromStringShouldMatch(self):
self.author.name = atom.data.Name()
self.author.name.text = 'Jeff Scudder'
string_from_author = self.author.ToString()
new_author = atom.core.XmlElementFromString(string_from_author,
atom.data.Author)
string_from_new_author = new_author.ToString()
self.assertEqual(string_from_author, string_from_new_author)
self.assertEqual(self.author.name.text, new_author.name.text)
def testExtensionElements(self):
self.author.extension_attributes['foo1'] = 'bar'
self.author.extension_attributes['foo2'] = 'rab'
self.assertEqual(self.author.extension_attributes['foo1'], 'bar')
self.assertEqual(self.author.extension_attributes['foo2'], 'rab')
new_author = atom.core.parse(str(self.author), atom.data.Author)
self.assertEqual(new_author.extension_attributes['foo1'], 'bar')
self.assertEqual(new_author.extension_attributes['foo2'], 'rab')
def testConvertFullAuthorToAndFromString(self):
TEST_AUTHOR = """<?xml version="1.0" encoding="utf-8"?>
<author xmlns="http://www.w3.org/2005/Atom">
<name xmlns="http://www.w3.org/2005/Atom">John Doe</name>
<email xmlns="http://www.w3.org/2005/Atom">john@example.com</email>
<uri>http://www.google.com</uri>
</author>"""
author = atom.core.parse(TEST_AUTHOR, atom.data.Author)
self.assertEqual(author.name.text, 'John Doe')
self.assertEqual(author.email.text, 'john@example.com')
self.assertEqual(author.uri.text, 'http://www.google.com')
class EmailTest(unittest.TestCase):
def setUp(self):
self.email = atom.data.Email()
def testEmailToAndFromString(self):
self.email.text = 'This is a test'
new_email = atom.core.parse(self.email.to_string(), atom.data.Email)
self.assertEqual(self.email.text, new_email.text)
self.assertEqual(self.email.extension_elements,
new_email.extension_elements)
class NameTest(unittest.TestCase):
def setUp(self):
self.name = atom.data.Name()
def testEmptyNameToAndFromStringShouldMatch(self):
string_from_name = self.name.ToString()
new_name = atom.core.XmlElementFromString(string_from_name,
atom.data.Name)
string_from_new_name = new_name.ToString()
self.assertEqual(string_from_name, string_from_new_name)
def testText(self):
self.assert_(self.name.text is None)
self.name.text = 'Jeff Scudder'
self.assertEqual(self.name.text, 'Jeff Scudder')
new_name = atom.core.parse(self.name.to_string(), atom.data.Name)
self.assertEqual(new_name.text, self.name.text)
def testExtensionElements(self):
self.name.extension_attributes['foo'] = 'bar'
self.assertEqual(self.name.extension_attributes['foo'], 'bar')
new_name = atom.core.parse(self.name.ToString(), atom.data.Name)
self.assertEqual(new_name.extension_attributes['foo'], 'bar')
class ExtensionElementTest(unittest.TestCase):
def setUp(self):
self.ee = atom.data.ExtensionElement('foo')
self.EXTENSION_TREE = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<g:author xmlns:g="http://www.google.com">
<g:name>John Doe
<g:foo yes="no" up="down">Bar</g:foo>
</g:name>
</g:author>
</feed>"""
def testEmptyEEShouldProduceEmptyString(self):
pass
def testEEParsesTreeCorrectly(self):
deep_tree = atom.core.xml_element_from_string(self.EXTENSION_TREE,
atom.data.ExtensionElement)
self.assertEqual(deep_tree.tag, 'feed')
self.assertEqual(deep_tree.namespace, 'http://www.w3.org/2005/Atom')
self.assert_(deep_tree.children[0].tag == 'author')
self.assert_(deep_tree.children[0].namespace == 'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].tag == 'name')
self.assert_(deep_tree.children[0].children[0].namespace ==
'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].text.strip() == 'John Doe')
self.assert_(deep_tree.children[0].children[0].children[0].text.strip() ==
'Bar')
foo = deep_tree.children[0].children[0].children[0]
self.assert_(foo.tag == 'foo')
self.assert_(foo.namespace == 'http://www.google.com')
self.assert_(foo.attributes['up'] == 'down')
self.assert_(foo.attributes['yes'] == 'no')
self.assert_(foo.children == [])
def testEEToAndFromStringShouldMatch(self):
string_from_ee = self.ee.ToString()
new_ee = atom.core.xml_element_from_string(string_from_ee,
atom.data.ExtensionElement)
string_from_new_ee = new_ee.ToString()
self.assert_(string_from_ee == string_from_new_ee)
deep_tree = atom.core.xml_element_from_string(self.EXTENSION_TREE,
atom.data.ExtensionElement)
string_from_deep_tree = deep_tree.ToString()
new_deep_tree = atom.core.xml_element_from_string(string_from_deep_tree,
atom.data.ExtensionElement)
string_from_new_deep_tree = new_deep_tree.ToString()
self.assert_(string_from_deep_tree == string_from_new_deep_tree)
class LinkTest(unittest.TestCase):
def setUp(self):
self.link = atom.data.Link()
def testLinkToAndFromString(self):
self.link.href = 'test href'
self.link.hreflang = 'english'
self.link.type = 'text/html'
self.link.extension_attributes['foo'] = 'bar'
self.assert_(self.link.href == 'test href')
self.assert_(self.link.hreflang == 'english')
self.assert_(self.link.type == 'text/html')
self.assert_(self.link.extension_attributes['foo'] == 'bar')
new_link = atom.core.parse(self.link.ToString(), atom.data.Link)
self.assert_(self.link.href == new_link.href)
self.assert_(self.link.type == new_link.type)
self.assert_(self.link.hreflang == new_link.hreflang)
self.assert_(self.link.extension_attributes['foo'] ==
new_link.extension_attributes['foo'])
def testLinkType(self):
test_link = atom.data.Link(type='text/html')
self.assertEqual(test_link.type, 'text/html')
class GeneratorTest(unittest.TestCase):
def setUp(self):
self.generator = atom.data.Generator()
def testGeneratorToAndFromString(self):
self.generator.uri = 'www.google.com'
self.generator.version = '1.0'
self.generator.extension_attributes['foo'] = 'bar'
self.assert_(self.generator.uri == 'www.google.com')
self.assert_(self.generator.version == '1.0')
self.assert_(self.generator.extension_attributes['foo'] == 'bar')
new_generator = atom.core.parse(self.generator.ToString(), atom.data.Generator)
self.assert_(self.generator.uri == new_generator.uri)
self.assert_(self.generator.version == new_generator.version)
self.assert_(self.generator.extension_attributes['foo'] ==
new_generator.extension_attributes['foo'])
class TitleTest(unittest.TestCase):
def setUp(self):
self.title = atom.data.Title()
def testTitleToAndFromString(self):
self.title.type = 'text'
self.title.text = 'Less: <'
self.assert_(self.title.type == 'text')
self.assert_(self.title.text == 'Less: <')
new_title = atom.core.parse(str(self.title), atom.data.Title)
self.assert_(self.title.type == new_title.type)
self.assert_(self.title.text == new_title.text)
class SubtitleTest(unittest.TestCase):
def setUp(self):
self.subtitle = atom.data.Subtitle()
def testTitleToAndFromString(self):
self.subtitle.type = 'text'
self.subtitle.text = 'sub & title'
self.assert_(self.subtitle.type == 'text')
self.assert_(self.subtitle.text == 'sub & title')
new_subtitle = atom.core.parse(self.subtitle.ToString(),
atom.data.Subtitle)
self.assert_(self.subtitle.type == new_subtitle.type)
self.assert_(self.subtitle.text == new_subtitle.text)
class SummaryTest(unittest.TestCase):
def setUp(self):
self.summary = atom.data.Summary()
def testTitleToAndFromString(self):
self.summary.type = 'text'
self.summary.text = 'Less: <'
self.assert_(self.summary.type == 'text')
self.assert_(self.summary.text == 'Less: <')
new_summary = atom.core.parse(self.summary.ToString(), atom.data.Summary)
self.assert_(self.summary.type == new_summary.type)
self.assert_(self.summary.text == new_summary.text)
class CategoryTest(unittest.TestCase):
def setUp(self):
self.category = atom.data.Category()
def testCategoryToAndFromString(self):
self.category.term = 'x'
self.category.scheme = 'y'
self.category.label = 'z'
self.assert_(self.category.term == 'x')
self.assert_(self.category.scheme == 'y')
self.assert_(self.category.label == 'z')
new_category = atom.core.parse(self.category.to_string(),
atom.data.Category)
self.assert_(self.category.term == new_category.term)
self.assert_(self.category.scheme == new_category.scheme)
self.assert_(self.category.label == new_category.label)
class ContributorTest(unittest.TestCase):
def setUp(self):
self.contributor = atom.data.Contributor()
def testContributorToAndFromString(self):
self.contributor.name = atom.data.Name(text='J Scud')
self.contributor.email = atom.data.Email(text='nobody@nowhere')
self.contributor.uri = atom.data.Uri(text='http://www.google.com')
self.assert_(self.contributor.name.text == 'J Scud')
self.assert_(self.contributor.email.text == 'nobody@nowhere')
self.assert_(self.contributor.uri.text == 'http://www.google.com')
new_contributor = atom.core.parse(self.contributor.ToString(),
atom.data.Contributor)
self.assert_(self.contributor.name.text == new_contributor.name.text)
self.assert_(self.contributor.email.text == new_contributor.email.text)
self.assert_(self.contributor.uri.text == new_contributor.uri.text)
class IdTest(unittest.TestCase):
def setUp(self):
self.my_id = atom.data.Id()
def testIdToAndFromString(self):
self.my_id.text = 'my nifty id'
self.assert_(self.my_id.text == 'my nifty id')
new_id = atom.core.parse(self.my_id.ToString(), atom.data.Id)
self.assert_(self.my_id.text == new_id.text)
class IconTest(unittest.TestCase):
def setUp(self):
self.icon = atom.data.Icon()
def testIconToAndFromString(self):
self.icon.text = 'my picture'
self.assert_(self.icon.text == 'my picture')
new_icon = atom.core.parse(str(self.icon), atom.data.Icon)
self.assert_(self.icon.text == new_icon.text)
class LogoTest(unittest.TestCase):
def setUp(self):
self.logo = atom.data.Logo()
def testLogoToAndFromString(self):
self.logo.text = 'my logo'
self.assert_(self.logo.text == 'my logo')
new_logo = atom.core.parse(self.logo.ToString(), atom.data.Logo)
self.assert_(self.logo.text == new_logo.text)
class RightsTest(unittest.TestCase):
def setUp(self):
self.rights = atom.data.Rights()
def testContributorToAndFromString(self):
self.rights.text = 'you have the right to remain silent'
self.rights.type = 'text'
self.assert_(self.rights.text == 'you have the right to remain silent')
self.assert_(self.rights.type == 'text')
new_rights = atom.core.parse(self.rights.ToString(), atom.data.Rights)
self.assert_(self.rights.text == new_rights.text)
self.assert_(self.rights.type == new_rights.type)
class UpdatedTest(unittest.TestCase):
def setUp(self):
self.updated = atom.data.Updated()
def testUpdatedToAndFromString(self):
self.updated.text = 'my time'
self.assert_(self.updated.text == 'my time')
new_updated = atom.core.parse(self.updated.ToString(), atom.data.Updated)
self.assert_(self.updated.text == new_updated.text)
class PublishedTest(unittest.TestCase):
def setUp(self):
self.published = atom.data.Published()
def testPublishedToAndFromString(self):
self.published.text = 'pub time'
self.assert_(self.published.text == 'pub time')
new_published = atom.core.parse(self.published.ToString(),
atom.data.Published)
self.assert_(self.published.text == new_published.text)
class FeedEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def testConvertToAndFromElementTree(self):
# Use entry because FeedEntryParent doesn't have a tag or namespace.
original = atom.data.Entry()
copy = atom.data.FeedEntryParent()
original.author.append(atom.data.Author(name=atom.data.Name(
text='J Scud')))
self.assert_(original.author[0].name.text == 'J Scud')
self.assert_(copy.author == [])
original.id = atom.data.Id(text='test id')
self.assert_(original.id.text == 'test id')
self.assert_(copy.id is None)
copy._harvest_tree(original._to_tree())
self.assert_(original.author[0].name.text == copy.author[0].name.text)
self.assert_(original.id.text == copy.id.text)
class EntryTest(unittest.TestCase):
def testConvertToAndFromString(self):
entry = atom.data.Entry()
entry.author.append(atom.data.Author(name=atom.data.Name(text='js')))
entry.title = atom.data.Title(text='my test entry')
self.assert_(entry.author[0].name.text == 'js')
self.assert_(entry.title.text == 'my test entry')
new_entry = atom.core.parse(entry.ToString(), atom.data.Entry)
self.assert_(new_entry.author[0].name.text == 'js')
self.assert_(new_entry.title.text == 'my test entry')
def testEntryCorrectlyConvertsActualData(self):
entry = atom.core.parse(XML_ENTRY_1, atom.data.Entry)
self.assert_(entry.category[0].scheme ==
'http://base.google.com/categories/itemtypes')
self.assert_(entry.category[0].term == 'products')
self.assert_(entry.id.text == ' http://www.google.com/test/id/url ')
self.assert_(entry.title.text == 'Testing 2000 series laptop')
self.assert_(entry.title.type == 'text')
self.assert_(entry.content.type == 'xhtml')
#TODO check all other values for the test entry
def testEntryWithFindElementAndFindAttribute(self):
entry = atom.data.Entry()
entry.link.append(atom.data.Link(rel='self', href='x'))
entry.link.append(atom.data.Link(rel='foo', href='y'))
entry.link.append(atom.data.Link(rel='edit',href='z'))
self_link = None
edit_link = None
for link in entry.get_elements('link', 'http://www.w3.org/2005/Atom'):
ignored1, ignored2, attributes = link.__class__._get_rules(2)
if link.get_attributes('rel')[0].value == 'self':
self_link = link.get_attributes('href')[0].value
elif link.get_attributes('rel')[0].value == 'edit':
edit_link = link.get_attributes('href')[0].value
self.assertEqual(self_link, 'x')
self.assertEqual(edit_link, 'z')
def testAppControl(self):
TEST_BASE_ENTRY = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<app:control xmlns:app='http://purl.org/atom/app#'>
<app:draft>yes</app:draft>
<gm:disapproved
xmlns:gm='http://base.google.com/ns-metadata/1.0'/>
</app:control>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
entry = atom.core.parse(TEST_BASE_ENTRY, atom.data.Entry)
self.assertEquals(entry.control.draft.text, 'yes')
self.assertEquals(len(entry.control.extension_elements), 1)
self.assertEquals(entry.control.extension_elements[0].tag, 'disapproved')
class ControlTest(unittest.TestCase):
def testVersionRuleGeneration(self):
self.assertEqual(atom.core._get_qname(atom.data.Control, 1),
'{http://purl.org/atom/app#}control')
self.assertEqual(atom.data.Control._get_rules(1)[0],
'{http://purl.org/atom/app#}control')
def testVersionedControlFromString(self):
xml_v1 = """<control xmlns="http://purl.org/atom/app#">
<draft>no</draft></control>"""
xml_v2 = """<control xmlns="http://www.w3.org/2007/app">
<draft>no</draft></control>"""
control_v1 = atom.core.parse(xml_v1, atom.data.Control, 1)
control_v2 = atom.core.parse(xml_v2, atom.data.Control, 2)
self.assert_(control_v1 is not None)
self.assert_(control_v2 is not None)
# Parsing with mismatched version numbers should return None.
self.assert_(atom.core.parse(xml_v1, atom.data.Control, 2) is None)
self.assert_(atom.core.parse(xml_v2, atom.data.Control, 1) is None)
def testConvertToAndFromString(self):
control = atom.data.Control()
control.text = 'some text'
control.draft = atom.data.Draft(text='yes')
self.assertEquals(control.draft.text, 'yes')
self.assertEquals(control.text, 'some text')
self.assert_(isinstance(control.draft, atom.data.Draft))
new_control = atom.core.parse(str(control), atom.data.Control)
self.assertEquals(control.draft.text, new_control.draft.text)
self.assertEquals(control.text, new_control.text)
self.assert_(isinstance(new_control.draft, atom.data.Draft))
class DraftTest(unittest.TestCase):
def testConvertToAndFromString(self):
draft = atom.data.Draft()
draft.text = 'maybe'
draft.extension_attributes['foo'] = 'bar'
self.assertEquals(draft.text, 'maybe')
self.assertEquals(draft.extension_attributes['foo'], 'bar')
new_draft = atom.core.parse(str(draft), atom.data.Draft)
self.assertEquals(draft.text, new_draft.text)
self.assertEquals(draft.extension_attributes['foo'],
new_draft.extension_attributes['foo'])
class SourceTest(unittest.TestCase):
def testConvertToAndFromString(self):
source = atom.data.Source()
source.author.append(atom.data.Author(name=atom.data.Name(text='js')))
source.title = atom.data.Title(text='my test source')
source.generator = atom.data.Generator(text='gen')
self.assert_(source.author[0].name.text == 'js')
self.assert_(source.title.text == 'my test source')
self.assert_(source.generator.text == 'gen')
new_source = atom.core.parse(source.ToString(), atom.data.Source)
self.assert_(new_source.author[0].name.text == 'js')
self.assert_(new_source.title.text == 'my test source')
self.assert_(new_source.generator.text == 'gen')
class FeedTest(unittest.TestCase):
def testConvertToAndFromString(self):
feed = atom.data.Feed()
feed.author.append(atom.data.Author(name=atom.data.Name(text='js')))
feed.title = atom.data.Title(text='my test source')
feed.generator = atom.data.Generator(text='gen')
feed.entry.append(atom.data.Entry(author=[atom.data.Author(
name=atom.data.Name(text='entry author'))]))
self.assert_(feed.author[0].name.text == 'js')
self.assert_(feed.title.text == 'my test source')
self.assert_(feed.generator.text == 'gen')
self.assert_(feed.entry[0].author[0].name.text == 'entry author')
new_feed = atom.core.parse(feed.ToString(), atom.data.Feed)
self.assert_(new_feed.author[0].name.text == 'js')
self.assert_(new_feed.title.text == 'my test source')
self.assert_(new_feed.generator.text == 'gen')
self.assert_(new_feed.entry[0].author[0].name.text == 'entry author')
def testPreserveEntryOrder(self):
test_xml = (
'<feed xmlns="http://www.w3.org/2005/Atom">'
'<entry><id>0</id></entry>'
'<entry><id>1</id></entry>'
'<title>Testing Order</title>'
'<entry><id>2</id></entry>'
'<entry><id>3</id></entry>'
'<entry><id>4</id></entry>'
'<entry><id>5</id></entry>'
'<entry><id>6</id></entry>'
'<entry><id>7</id></entry>'
'<author/>'
'<entry><id>8</id></entry>'
'<id>feed_id</id>'
'<entry><id>9</id></entry>'
'</feed>')
feed = atom.core.parse(test_xml, atom.data.Feed)
for i in xrange(10):
self.assertEqual(feed.entry[i].id.text, str(i))
feed = atom.core.parse(feed.ToString(), atom.data.Feed)
for i in xrange(10):
self.assertEqual(feed.entry[i].id.text, str(i))
temp = feed.entry[3]
feed.entry[3] = feed.entry[4]
feed.entry[4] = temp
self.assert_(feed.entry[2].id.text == '2')
self.assert_(feed.entry[3].id.text == '4')
self.assert_(feed.entry[4].id.text == '3')
self.assert_(feed.entry[5].id.text == '5')
feed = atom.core.parse(feed.to_string(), atom.data.Feed)
self.assertEqual(feed.entry[2].id.text, '2')
self.assertEqual(feed.entry[3].id.text, '4')
self.assertEqual(feed.entry[4].id.text, '3')
self.assertEqual(feed.entry[5].id.text, '5')
class ContentEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def setUp(self):
self.content = atom.data.Content()
def testConvertToAndFromElementTree(self):
self.content.text = 'my content'
self.content.type = 'text'
self.content.src = 'my source'
self.assert_(self.content.text == 'my content')
self.assert_(self.content.type == 'text')
self.assert_(self.content.src == 'my source')
new_content = atom.core.parse(self.content.ToString(), atom.data.Content)
self.assert_(self.content.text == new_content.text)
self.assert_(self.content.type == new_content.type)
self.assert_(self.content.src == new_content.src)
def testContentConstructorSetsSrc(self):
new_content = atom.data.Content(src='abcd')
self.assertEquals(new_content.src, 'abcd')
def testContentFromString(self):
content_xml = '<content xmlns="http://www.w3.org/2005/Atom" type="test"/>'
content = atom.core.parse(content_xml, atom.data.Content)
self.assert_(isinstance(content, atom.data.Content))
self.assertEqual(content.type, 'test')
class PreserveUnkownElementTest(unittest.TestCase):
"""Tests correct preservation of XML elements which are non Atom"""
def setUp(self):
GBASE_ATTRIBUTE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes</id>
<updated>2006-11-01T20:35:59.578Z</updated>
<category scheme='http://base.google.com/categories/itemtypes'
term='online jobs'></category>
<category scheme='http://base.google.com/categories/itemtypes'
term='jobs'></category>
<title type='text'>histogram for query: [item type:jobs]</title>
<link rel='alternate' type='text/html'
href='http://base.google.com'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/attributes/jobs'></link>
<generator version='1.0'
uri='http://base.google.com'>GoogleBase</generator>
<openSearch:totalResults>16</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>16</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/attributes/job+industy</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
</feed>"""
self.feed = atom.core.parse(GBASE_ATTRIBUTE_FEED,
atom.data.Feed)
def testCaptureOpenSearchElements(self):
self.assertEquals(self.feed.FindExtensions('totalResults')[0].tag,
'totalResults')
self.assertEquals(self.feed.FindExtensions('totalResults')[0].namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
open_search_extensions = self.feed.FindExtensions(
namespace='http://a9.com/-/spec/opensearchrss/1.0/')
self.assertEquals(len(open_search_extensions), 3)
for element in open_search_extensions:
self.assertEquals(element.namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
def testCaptureMetaElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_elements), 1)
self.assertEquals(meta_elements[0].attributes['count'], '4416629')
self.assertEquals(len(meta_elements[0].children), 10)
def testCaptureMetaChildElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
meta_children = meta_elements[0].FindChildren(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_children), 10)
for child in meta_children:
self.assertEquals(child.tag, 'value')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(XML_ENTRY_1, atom.data.Entry)
def testLinkFinderGetsLicenseLink(self):
self.assert_(isinstance(self.entry.GetLink('license'), atom.data.Link))
self.assert_(isinstance(self.entry.GetLicenseLink(), atom.data.Link))
self.assertEquals(self.entry.GetLink('license').href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.get_license_link().href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.GetLink('license').rel, 'license')
self.assertEquals(self.entry.FindLicenseLink(),
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
def testLinkFinderGetsAlternateLink(self):
self.assert_(isinstance(self.entry.GetLink('alternate'),
atom.data.Link))
self.assertEquals(self.entry.GetLink('alternate').href,
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.FindAlternateLink(),
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.GetLink('alternate').rel, 'alternate')
class AtomBaseTest(unittest.TestCase):
def testAtomBaseConvertsExtensions(self):
# Using Id because it adds no additional members.
atom_base = atom.data.Id()
extension_child = atom.data.ExtensionElement('foo',
namespace='http://ns0.com')
extension_grandchild = atom.data.ExtensionElement('bar',
namespace='http://ns0.com')
extension_child.children.append(extension_grandchild)
atom_base.extension_elements.append(extension_child)
self.assertEquals(len(atom_base.extension_elements), 1)
self.assertEquals(len(atom_base.extension_elements[0].children), 1)
self.assertEquals(atom_base.extension_elements[0].tag, 'foo')
self.assertEquals(atom_base.extension_elements[0].children[0].tag, 'bar')
element_tree = atom_base._to_tree()
self.assert_(element_tree.find('{http://ns0.com}foo') is not None)
self.assert_(element_tree.find('{http://ns0.com}foo').find(
'{http://ns0.com}bar') is not None)
class UtfParsingTest(unittest.TestCase):
def setUp(self):
self.test_xml = u"""<?xml version="1.0" encoding="utf-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'>
<id>http://www.google.com/test/id/url</id>
<title type='αλφα'>αλφα</title>
</entry>"""
def testMemberStringEncoding(self):
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(isinstance(atom_entry.title.type, unicode))
self.assertEqual(atom_entry.title.type, u'\u03B1\u03BB\u03C6\u03B1')
self.assertEqual(atom_entry.title.text, u'\u03B1\u03BB\u03C6\u03B1')
# Setting object members to unicode strings is supported.
atom_entry.title.type = u'\u03B1\u03BB\u03C6\u03B1'
xml = atom_entry.ToString()
# The unicode code points should be converted to XML escaped sequences.
self.assert_('αλφα' in xml)
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is utf8
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
# Test something else than utf-8
atom.core.STRING_ENCODING = 'iso8859_7'
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Test using unicode strings directly for object members
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is
# unicode
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
def testConvertExampleXML(self):
GBASE_STRING_ENCODING_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:gm='http://base.google.com/ns-metadata/1.0'
xmlns:g='http://base.google.com/ns/1.0'
xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets/1749</id>
<published>2007-12-09T03:13:07.000Z</published>
<updated>2008-01-07T03:26:46.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes'
term='Products'/>
<title type='text'>Digital Camera Cord Fits DSC-R1 S40</title>
<content type='html'>SONY \xC2\xB7 Cybershot Digital Camera Usb
Cable DESCRIPTION This is a 2.5 USB 2.0 A to Mini B (5 Pin)
high quality digital camera cable used for connecting your
Sony Digital Cameras and Camcoders. Backward
Compatible with USB 2.0, 1.0 and 1.1. Fully ...</content>
<link rel='alternate' type='text/html'
href='http://adfarm.mediaplex.com/ad/ck/711-5256-8196-2mm'/>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/feeds/snippets/1749'/>
<author>
<name>eBay</name>
</author>
<g:item_type type='text'>Products</g:item_type>
<g:item_language type='text'>EN</g:item_language>
<g:target_country type='text'>US</g:target_country>
<g:price type='floatUnit'>0.99 usd</g:price>
<g:image_link
type='url'>http://www.example.com/pict/27_1.jpg</g:image_link>
<g:category type='text'>Cameras & Photo>Digital Camera
Accessories>Cables</g:category>
<g:category type='text'>Cords & USB Cables</g:category>
<g:customer_id type='int'>11729</g:customer_id>
<g:id type='text'>270195049057</g:id>
<g:expiration_date
type='dateTime'>2008-02-06T03:26:46Z</g:expiration_date>
</entry>"""
try:
entry = atom.core.parse(GBASE_STRING_ENCODING_ENTRY,
atom.data.Entry)
except UnicodeDecodeError:
self.fail('Error when converting XML')
class VersionedXmlTest(unittest.TestCase):
def test_monoversioned_parent_with_multiversioned_child(self):
v2_rules = atom.data.Entry._get_rules(2)
self.assert_('{http://www.w3.org/2007/app}control' in v2_rules[1])
entry_xml = """<entry xmlns='http://www.w3.org/2005/Atom'>
<app:control xmlns:app='http://www.w3.org/2007/app'>
<app:draft>yes</app:draft>
</app:control>
</entry>"""
entry = e = atom.core.parse(entry_xml, atom.data.Entry, version=2)
self.assert_(entry is not None)
self.assert_(entry.control is not None)
self.assert_(entry.control.draft is not None)
self.assertEqual(entry.control.draft.text, 'yes')
# v1 rules should not parse v2 XML.
entry = e = atom.core.parse(entry_xml, atom.data.Entry, version=1)
self.assert_(entry is not None)
self.assert_(entry.control is None)
# The default version should be v1.
entry = e = atom.core.parse(entry_xml, atom.data.Entry)
self.assert_(entry is not None)
self.assert_(entry.control is None)
class DataModelSanityTest(unittest.TestCase):
def test_xml_elements(self):
conf.check_data_classes(self, [
atom.data.Feed, atom.data.Source, atom.data.Logo,
atom.data.Control, atom.data.Draft, atom.data.Generator])
def suite():
return conf.build_suite([AuthorTest, EmailTest, NameTest,
ExtensionElementTest, LinkTest, GeneratorTest,
TitleTest, SubtitleTest, SummaryTest, IdTest,
IconTest, LogoTest, RightsTest, UpdatedTest,
PublishedTest, FeedEntryParentTest, EntryTest,
ContentEntryParentTest, PreserveUnkownElementTest,
FeedTest, LinkFinderTest, AtomBaseTest,
UtfParsingTest, VersionedXmlTest,
DataModelSanityTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.auth
import atom.http_core
class BasicAuthTest(unittest.TestCase):
def test_modify_request(self):
http_request = atom.http_core.HttpRequest()
credentials = atom.auth.BasicAuth('Aladdin', 'open sesame')
self.assert_(credentials.basic_cookie == 'QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
credentials.modify_request(http_request)
self.assert_(http_request.headers[
'Authorization'] == 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
def suite():
return unittest.TestSuite((unittest.makeSuite(BasicAuthTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.http_core
import StringIO
class UriTest(unittest.TestCase):
def test_parse_uri(self):
uri = atom.http_core.parse_uri('http://www.google.com/test?q=foo&z=bar')
self.assert_(uri.scheme == 'http')
self.assert_(uri.host == 'www.google.com')
self.assert_(uri.port is None)
self.assert_(uri.path == '/test')
self.assert_(uri.query == {'z':'bar', 'q':'foo'})
def test_static_parse_uri(self):
uri = atom.http_core.Uri.parse_uri('http://test.com/?token=foo&x=1')
self.assertEqual(uri.scheme, 'http')
self.assertEqual(uri.host, 'test.com')
self.assert_(uri.port is None)
self.assertEqual(uri.query, {'token':'foo', 'x':'1'})
def test_modify_request_no_request(self):
uri = atom.http_core.parse_uri('http://www.google.com/test?q=foo&z=bar')
request = uri.modify_request()
self.assert_(request.uri.scheme == 'http')
self.assert_(request.uri.host == 'www.google.com')
# If no port was provided, the HttpClient is responsible for determining
# the default.
self.assert_(request.uri.port is None)
self.assert_(request.uri.path.startswith('/test'))
self.assertEqual(request.uri.query, {'z': 'bar', 'q': 'foo'})
self.assert_(request.method is None)
self.assert_(request.headers == {})
self.assert_(request._body_parts == [])
def test_modify_request_http_with_set_port(self):
request = atom.http_core.HttpRequest(uri=atom.http_core.Uri(port=8080),
method='POST')
request.add_body_part('hello', 'text/plain')
uri = atom.http_core.parse_uri('//example.com/greet')
self.assert_(uri.query == {})
self.assert_(uri._get_relative_path() == '/greet')
self.assert_(uri.host == 'example.com')
self.assert_(uri.port is None)
uri.ModifyRequest(request)
self.assert_(request.uri.host == 'example.com')
# If no scheme was provided, the URI will not add one, but the HttpClient
# should assume the request is HTTP.
self.assert_(request.uri.scheme is None)
self.assert_(request.uri.port == 8080)
self.assert_(request.uri.path == '/greet')
self.assert_(request.method == 'POST')
self.assert_(request.headers['Content-Type'] == 'text/plain')
def test_modify_request_use_default_ssl_port(self):
request = atom.http_core.HttpRequest(
uri=atom.http_core.Uri(scheme='https'), method='PUT')
request.add_body_part('hello', 'text/plain')
uri = atom.http_core.parse_uri('/greet')
uri.modify_request(request)
self.assert_(request.uri.host is None)
self.assert_(request.uri.scheme == 'https')
# If no port was provided, leave the port as None, it is up to the
# HttpClient to set the correct default port.
self.assert_(request.uri.port is None)
self.assert_(request.uri.path == '/greet')
self.assert_(request.method == 'PUT')
self.assert_(request.headers['Content-Type'] == 'text/plain')
self.assert_(len(request._body_parts) == 1)
self.assert_(request._body_parts[0] == 'hello')
def test_to_string(self):
uri = atom.http_core.Uri(host='www.google.com', query={'q':'sippycode'})
uri_string = uri._to_string()
self.assert_(uri_string == 'http://www.google.com/?q=sippycode')
class HttpRequestTest(unittest.TestCase):
def test_request_with_one_body_part(self):
request = atom.http_core.HttpRequest()
self.assert_(len(request._body_parts) == 0)
self.assert_('Content-Length' not in request.headers)
self.assert_(not 'Content-Type' in request.headers)
self.assert_(not 'Content-Length' in request.headers)
request.add_body_part('this is a test', 'text/plain')
self.assert_(len(request._body_parts) == 1)
self.assert_(request.headers['Content-Type'] == 'text/plain')
self.assert_(request._body_parts[0] == 'this is a test')
self.assert_(request.headers['Content-Length'] == str(len(
'this is a test')))
def test_add_file_without_size(self):
virtual_file = StringIO.StringIO('this is a test')
request = atom.http_core.HttpRequest()
try:
request.add_body_part(virtual_file, 'text/plain')
self.fail('We should have gotten an UnknownSize error.')
except atom.http_core.UnknownSize:
pass
request.add_body_part(virtual_file, 'text/plain', len('this is a test'))
self.assert_(len(request._body_parts) == 1)
self.assert_(request.headers['Content-Type'] == 'text/plain')
self.assert_(request._body_parts[0].read() == 'this is a test')
self.assert_(request.headers['Content-Length'] == str(len(
'this is a test')))
def test_copy(self):
request = atom.http_core.HttpRequest(
uri=atom.http_core.Uri(scheme='https', host='www.google.com'),
method='POST', headers={'test':'1', 'ok':'yes'})
request.add_body_part('body1', 'text/plain')
request.add_body_part('<html>body2</html>', 'text/html')
copied = request._copy()
self.assert_(request.uri.scheme == copied.uri.scheme)
self.assert_(request.uri.host == copied.uri.host)
self.assert_(request.method == copied.method)
self.assert_(request.uri.path == copied.uri.path)
self.assert_(request.headers == copied.headers)
self.assert_(request._body_parts == copied._body_parts)
copied.headers['test'] = '2'
copied._body_parts[1] = '<html>body3</html>'
self.assert_(request.headers != copied.headers)
self.assert_(request._body_parts != copied._body_parts)
def suite():
return unittest.TestSuite((unittest.makeSuite(UriTest,'test'),
unittest.makeSuite(HttpRequestTest,'test')))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.token_store
import atom.http_interface
import atom.service
import atom.url
class TokenStoreTest(unittest.TestCase):
def setUp(self):
self.token = atom.service.BasicAuthToken('aaa1', scopes=[
'http://example.com/', 'http://example.org'])
self.tokens = atom.token_store.TokenStore()
self.tokens.add_token(self.token)
def testAddAndFindTokens(self):
self.assert_(self.tokens.find_token('http://example.com/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/foo?ok=1') == (
self.token))
self.assert_(isinstance(self.tokens.find_token('http://example.net/'),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token('example.com/'),
atom.http_interface.GenericToken))
def testFindTokenUsingMultipleUrls(self):
self.assert_(self.tokens.find_token(
'http://example.com/') == self.token)
self.assert_(self.tokens.find_token(
'http://example.org/bar') == self.token)
self.assert_(isinstance(self.tokens.find_token(''),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token(
'http://example.net/'),
atom.http_interface.GenericToken))
def testFindTokenWithPartialScopes(self):
token = atom.service.BasicAuthToken('aaa1',
scopes=[atom.url.Url(host='www.example.com', path='/foo'),
atom.url.Url(host='www.example.net')])
token_store = atom.token_store.TokenStore()
token_store.add_token(token)
self.assert_(token_store.find_token(
'http://www.example.com/foobar') == token)
self.assert_(token_store.find_token(
'https://www.example.com:443/foobar') == token)
self.assert_(token_store.find_token(
'http://www.example.net/xyz') == token)
self.assert_(token_store.find_token('http://www.example.org/') != token)
self.assert_(isinstance(token_store.find_token('http://example.org/'),
atom.http_interface.GenericToken))
def suite():
return unittest.TestSuite((unittest.makeSuite(TokenStoreTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# This test may make an actual HTTP request.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.http_core
import atom.auth
import atom.client
import atom.mock_http_core
class AtomPubClientEchoTest(unittest.TestCase):
def test_simple_request_with_no_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
self.assert_(client.host is None)
self.assert_(client.auth_token is None)
# Make several equivalent requests.
responses = [client.request('GET', 'http://example.org/'),
client.request(http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri('http', 'example.org', path='/'),
method='GET')),
client.request('GET',
http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri('http', 'example.org',
path='/')))]
for response in responses:
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('User-Agent').startswith('gdata-py/'))
def test_auth_request_with_no_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
token = atom.auth.BasicAuth('Jeff', '123')
response = client.request('POST', 'https://example.net:8080/',
auth_token=token)
self.assert_(response.getheader('Echo-Host') == 'example.net:8080')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'https')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
self.assert_(response.getheader('Echo-Method') == 'POST')
def test_request_with_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient(),
'example.com', atom.auth.BasicAuth('Jeff', '123'))
self.assert_(client.host == 'example.com')
self.assert_(client.auth_token is not None)
self.assert_(client.auth_token.basic_cookie == 'SmVmZjoxMjM=')
response = client.request('GET', 'http://example.org/')
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
response = client.request('GET', '/')
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
response = client.request('GET', '/',
http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri(port=99)))
self.assert_(response.getheader('Echo-Host') == 'example.com:99')
self.assert_(response.getheader('Echo-Uri') == '/')
def test_get(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
response = client.get('http://example.com/simple')
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/simple')
self.assert_(response.getheader('Echo-Method') == 'GET')
response = client.Get(uri='http://example.com/simple2')
self.assert_(response.getheader('Echo-Uri') == '/simple2')
self.assert_(response.getheader('Echo-Method') == 'GET')
def test_modify_request_using_args(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class RequestModifier(object):
def modify_request(self, http_request):
http_request.headers['Special'] = 'Set'
response = client.get('http://example.com/modified',
extra=RequestModifier())
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/modified')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('Special') == 'Set')
def test_post(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class TestData(object):
def modify_request(self, http_request):
http_request.add_body_part('test body', 'text/testdata')
response = client.Post(uri='http://example.com/', data=TestData())
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Content-Length') == str(len('test body')))
self.assert_(response.getheader('Content-Type') == 'text/testdata')
self.assert_(response.read(2) == 'te')
self.assert_(response.read() == 'st body')
response = client.post(data=TestData(), uri='http://example.com/')
self.assert_(response.read() == 'test body')
self.assert_(response.getheader('Content-Type') == 'text/testdata')
# Don't pass in a body, but use an extra kwarg to add the body to the
# http_request.
response = client.post(x=TestData(), uri='http://example.com/')
self.assert_(response.read() == 'test body')
def test_put(self):
body_text = '<put>test</put>'
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class TestData(object):
def modify_request(self, http_request):
http_request.add_body_part(body_text, 'application/xml')
response = client.put('http://example.org', TestData())
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Method') == 'PUT')
self.assert_(response.getheader('Content-Length') == str(len(body_text)))
self.assert_(response.getheader('Content-Type') == 'application/xml')
response = client.put(uri='http://example.org', data=TestData())
self.assert_(response.getheader('Content-Length') == str(len(body_text)))
self.assert_(response.getheader('Content-Type') == 'application/xml')
def test_delete(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient(),
source='my new app')
response = client.Delete('http://example.com/simple')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
self.assertEqual(response.getheader('Echo-Uri'), '/simple')
self.assertEqual(response.getheader('Echo-Method'), 'DELETE')
response = client.delete(uri='http://example.com/d')
self.assertEqual(response.getheader('Echo-Uri'), '/d')
self.assertEqual(response.getheader('Echo-Method'), 'DELETE')
self.assert_(
response.getheader('User-Agent').startswith('my new app gdata-py/'))
def suite():
return unittest.TestSuite((unittest.makeSuite(AtomPubClientEchoTest, 'test'),
))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import unittest
import atom.service
import atom.mock_http_core
import gdata.test_config as conf
class AtomServiceUnitTest(unittest.TestCase):
def testBuildUriWithNoParams(self):
x = atom.service.BuildUri('/base/feeds/snippets')
self.assert_(x == '/base/feeds/snippets')
def testBuildUriWithParams(self):
# Add parameters to a URI
x = atom.service.BuildUri('/base/feeds/snippets', url_params={'foo': 'bar',
'bq': 'digital camera'})
self.assert_(x == '/base/feeds/snippets?foo=bar&bq=digital+camera')
self.assert_(x.startswith('/base/feeds/snippets'))
self.assert_(x.count('?') == 1)
self.assert_(x.count('&') == 1)
self.assert_(x.index('?') < x.index('&'))
self.assert_(x.index('bq=digital+camera') != -1)
# Add parameters to a URI that already has parameters
x = atom.service.BuildUri('/base/feeds/snippets?bq=digital+camera',
url_params={'foo': 'bar', 'max-results': '250'})
self.assert_(x.startswith('/base/feeds/snippets?bq=digital+camera'))
self.assert_(x.count('?') == 1)
self.assert_(x.count('&') == 2)
self.assert_(x.index('?') < x.index('&'))
self.assert_(x.index('max-results=250') != -1)
self.assert_(x.index('foo=bar') != -1)
def testBuildUriWithoutParameterEscaping(self):
x = atom.service.BuildUri('/base/feeds/snippets',
url_params={'foo': ' bar', 'bq': 'digital camera'},
escape_params=False)
self.assert_(x.index('foo= bar') != -1)
self.assert_(x.index('bq=digital camera') != -1)
def testParseHttpUrl(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'http://www.google.com/service/subservice?name=value')
self.assertEquals(ssl, False)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 80)
self.assertEquals(path, '/service/subservice?name=value')
def testParseHttpUrlWithPort(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'http://www.google.com:12/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, False)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 12)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testParseHttpsUrl(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'https://www.google.com/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, True)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 443)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testParseHttpsUrlWithPort(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'https://www.google.com:13981/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, True)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 13981)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testSetBasicAuth(self):
client = atom.service.AtomService()
client.UseBasicAuth('foo', 'bar')
token = client.token_store.find_token('http://')
self.assert_(isinstance(token, atom.service.BasicAuthToken))
self.assertEquals(token.auth_header, 'Basic Zm9vOmJhcg==')
client.UseBasicAuth('','')
token = client.token_store.find_token('http://')
self.assert_(isinstance(token, atom.service.BasicAuthToken))
self.assertEquals(token.auth_header, 'Basic Og==')
def testProcessUrlWithStringForService(self):
(server, port, ssl, uri) = atom.service.ProcessUrl(
service='www.google.com', url='/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(port, 80)
self.assertEquals(ssl, False)
self.assert_(uri.startswith('/base/feeds/items'))
client = atom.service.AtomService()
client.server = 'www.google.com'
client.ssl = True
(server, port, ssl, uri) = atom.service.ProcessUrl(
service=client, url='/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(ssl, True)
self.assert_(uri.startswith('/base/feeds/items'))
(server, port, ssl, uri) = atom.service.ProcessUrl(service=None,
url='https://www.google.com/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(port, 443)
self.assertEquals(ssl, True)
self.assert_(uri.startswith('/base/feeds/items'))
def testHostHeaderContainsNonDefaultPort(self):
client = atom.service.AtomService()
client.http_client.v2_http_client = atom.mock_http_core.EchoHttpClient()
response = client.Get('http://example.com')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
response = client.Get('https://example.com')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
response = client.Get('https://example.com:8080')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:8080')
response = client.Get('http://example.com:1234')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:1234')
def testBadHttpsProxyRaisesRealException(self):
"""Test that real exceptions are raised when there is an error connecting to
a host with an https proxy
"""
client = atom.service.AtomService(server='example.com')
client.server = 'example.com'
os.environ['https_proxy'] = 'http://example.com'
self.assertRaises(atom.http.ProxyError,
atom.service.PrepareConnection, client, 'https://example.com')
def suite():
return conf.build_suite([AtomServiceUnitTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
import gdata.service
import atom.mock_service
gdata.service.http_request_handler = atom.mock_service
class MockRequestTest(unittest.TestCase):
def setUp(self):
self.request_thumbprint = atom.mock_service.MockRequest('GET',
'http://www.google.com',
extra_headers={'Header1':'a', 'Header2':'b'})
def testIsMatch(self):
matching_request = atom.mock_service.MockRequest('GET',
'http://www.google.com', extra_headers={'Header1':'a',
'Header2':'b', 'Header3':'c'})
bad_url = atom.mock_service.MockRequest('GET', 'http://example.com',
extra_headers={'Header1':'a', 'Header2':'b', 'Header3':'c'})
# Should match because we don't check headers at the moment.
bad_header = atom.mock_service.MockRequest('GET',
'http://www.google.com', extra_headers={'Header1':'a',
'Header2':'1', 'Header3':'c'})
bad_verb = atom.mock_service.MockRequest('POST', 'http://www.google.com',
data='post data', extra_headers={'Header1':'a', 'Header2':'b'})
self.assertEquals(self.request_thumbprint.IsMatch(matching_request), True)
self.assertEquals(self.request_thumbprint.IsMatch(bad_url), False)
self.assertEquals(self.request_thumbprint.IsMatch(bad_header), True)
self.assertEquals(self.request_thumbprint.IsMatch(bad_verb), False)
class HttpRequestTest(unittest.TestCase):
def setUp(self):
atom.mock_service.recordings = []
self.client = gdata.service.GDataService()
def testSimpleRecordedGet(self):
recorded_request = atom.mock_service.MockRequest('GET', 'http://example.com/')
recorded_response = atom.mock_service.MockHttpResponse('Got it', 200,
'OK')
# Add a tuple mapping the mock request to the mock response
atom.mock_service.recordings.append((recorded_request, recorded_response))
# Try a couple of GET requests which should match the recorded request.
response = self.client.Get('http://example.com/', converter=str)
self.assertEquals(response, 'Got it')
self.client.server = 'example.com'
raw_response = self.client.handler.HttpRequest(self.client, 'GET', None,
'/')
self.assertEquals(raw_response.read(), 'Got it')
self.assertEquals(raw_response.status, 200)
self.assertEquals(raw_response.reason, 'OK')
class RecordRealHttpRequestsTest(unittest.TestCase):
def testRecordAndReuseResponse(self):
client = gdata.service.GDataService()
client.server = 'www.google.com'
atom.mock_service.recordings = []
atom.mock_service.real_request_handler = atom.service
# Record a response
real_response = atom.mock_service.HttpRequest(client, 'GET', None, 'http://www.google.com/')
# Enter 'replay' mode
atom.mock_service.real_request_handler = None
mock_response = atom.mock_service.HttpRequest(client, 'GET', None, 'http://www.google.com/')
self.assertEquals(real_response.reason, mock_response.reason)
self.assertEquals(real_response.status, mock_response.status)
self.assertEquals(real_response.read(), mock_response.read())
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.mock_http
import atom.http
class MockHttpClientUnitTest(unittest.TestCase):
def setUp(self):
self.client = atom.mock_http.MockHttpClient()
def testRepondToGet(self):
mock_response = atom.http_interface.HttpResponse(body='Hooray!',
status=200, reason='OK')
self.client.add_response(mock_response, 'GET',
'http://example.com/hooray')
response = self.client.request('GET', 'http://example.com/hooray')
self.assertEquals(len(self.client.recordings), 1)
self.assertEquals(response.status, 200)
self.assertEquals(response.read(), 'Hooray!')
def testRecordResponse(self):
# Turn on pass-through record mode.
self.client.real_client = atom.http.ProxiedHttpClient()
live_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
live_response_body = live_response.read()
self.assertEquals(live_response.status, 200)
self.assertEquals(live_response_body.startswith('<?xml'), True)
# Requery for the now canned data.
self.client.real_client = None
canned_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
# The canned response should be the stored response.
canned_response_body = canned_response.read()
self.assertEquals(canned_response.status, 200)
self.assertEquals(canned_response_body, live_response_body)
def testUnrecordedRequest(self):
try:
self.client.request('POST', 'http://example.org')
self.fail()
except atom.mock_http.NoRecordingFound:
pass
def suite():
return unittest.TestSuite(
(unittest.makeSuite(MockHttpClientUnitTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Perforce Defect Tracking Integration Project
# <http://www.ravenbrook.com/project/p4dti/>
#
# COVERAGE.PY -- COVERAGE TESTING
#
# Gareth Rees, Ravenbrook Limited, 2001-12-04
#
#
# 1. INTRODUCTION
#
# This module provides coverage testing for Python code.
#
# The intended readership is all Python developers.
#
# This document is not confidential.
#
# See [GDR 2001-12-04a] for the command-line interface, programmatic
# interface and limitations. See [GDR 2001-12-04b] for requirements and
# design.
"""Usage:
coverage.py -x MODULE.py [ARG1 ARG2 ...]
Execute module, passing the given command-line arguments, collecting
coverage data.
coverage.py -e
Erase collected coverage data.
coverage.py -r [-m] FILE1 FILE2 ...
Report on the statement coverage for the given files. With the -m
option, show line numbers of the statements that weren't executed.
coverage.py -a [-d dir] FILE1 FILE2 ...
Make annotated copies of the given files, marking statements that
are executed with > and statements that are missed with !. With
the -d option, make the copies in that directory. Without the -d
option, make each copy in the same directory as the original.
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else."""
import os
import re
import string
import sys
import types
# 2. IMPLEMENTATION
#
# This uses the "singleton" pattern.
#
# The word "morf" means a module object (from which the source file can
# be deduced by suitable manipulation of the __file__ attribute) or a
# filename.
#
# When we generate a coverage report we have to canonicalize every
# filename in the coverage dictionary just in case it refers to the
# module we are reporting on. It seems a shame to throw away this
# information so the data in the coverage dictionary is transferred to
# the 'cexecuted' dictionary under the canonical filenames.
#
# The coverage dictionary is called "c" and the trace function "t". The
# reason for these short names is that Python looks up variables by name
# at runtime and so execution time depends on the length of variables!
# In the bottleneck of this application it's appropriate to abbreviate
# names to increase speed.
# A dictionary with an entry for (Python source file name, line number
# in that file) if that line has been executed.
c = {}
# t(f, x, y). This method is passed to sys.settrace as a trace
# function. See [van Rossum 2001-07-20b, 9.2] for an explanation of
# sys.settrace and the arguments and return value of the trace function.
# See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
# objects.
def t(f, x, y):
c[(f.f_code.co_filename, f.f_lineno)] = 1
return t
the_coverage = None
class coverage:
error = "coverage error"
# Name of the cache file (unless environment variable is set).
cache_default = ".coverage"
# Environment variable naming the cache file.
cache_env = "COVERAGE_FILE"
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed.
cexecuted = {}
# Cache of results of calling the analysis() method, so that you can
# specify both -r and -a without doing double work.
analysis_cache = {}
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
canonical_filename_cache = {}
def __init__(self):
global the_coverage
if the_coverage:
raise self.error, "Only one coverage object allowed."
self.cache = os.environ.get(self.cache_env, self.cache_default)
self.restore()
self.analysis_cache = {}
def help(self, error=None):
if error:
print error
print
print __doc__
sys.exit(1)
def command_line(self):
import getopt
settings = {}
optmap = {
'-a': 'annotate',
'-d:': 'directory=',
'-e': 'erase',
'-h': 'help',
'-i': 'ignore-errors',
'-m': 'show-missing',
'-r': 'report',
'-x': 'execute',
}
short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
long_opts = optmap.values()
options, args = getopt.getopt(sys.argv[1:], short_opts,
long_opts)
for o, a in options:
if optmap.has_key(o):
settings[optmap[o]] = 1
elif optmap.has_key(o + ':'):
settings[optmap[o + ':']] = a
elif o[2:] in long_opts:
settings[o[2:]] = 1
elif o[2:] + '=' in long_opts:
settings[o[2:]] = a
else:
self.help("Unknown option: '%s'." % o)
if settings.get('help'):
self.help()
for i in ['erase', 'execute']:
for j in ['annotate', 'report']:
if settings.get(i) and settings.get(j):
self.help("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
args_needed = (settings.get('execute')
or settings.get('annotate')
or settings.get('report'))
action = settings.get('erase') or args_needed
if not action:
self.help("You must specify at least one of -e, -x, -r, "
"or -a.")
if not args_needed and args:
self.help("Unexpected arguments %s." % args)
if settings.get('erase'):
self.erase()
if settings.get('execute'):
if not args:
self.help("Nothing to do.")
sys.argv = args
self.start()
import __main__
# When Python starts a script, sys.path[0] is the directory
# in which the Python script was found. So when we run a
# script, change sys.path so that it matches what the script
# would have found if it had been run normally.
sys.path[0] = os.path.dirname(sys.argv[0])
execfile(sys.argv[0], __main__.__dict__)
if not args:
args = self.cexecuted.keys()
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
directory = settings.get('directory=')
if settings.get('report'):
self.report(args, show_missing, ignore_errors)
if settings.get('annotate'):
self.annotate(args, directory, ignore_errors)
def start(self):
sys.settrace(t)
def stop(self):
sys.settrace(None)
def erase(self):
global c
c = {}
self.analysis_cache = {}
self.cexecuted = {}
if os.path.exists(self.cache):
os.remove(self.cache)
# save(). Save coverage data to the coverage cache.
def save(self):
self.canonicalize_filenames()
cache = open(self.cache, 'wb')
import marshal
marshal.dump(self.cexecuted, cache)
cache.close()
# restore(). Restore coverage data from the coverage cache (if it
# exists).
def restore(self):
global c
c = {}
self.cexecuted = {}
if not os.path.exists(self.cache):
return
try:
cache = open(self.cache, 'rb')
import marshal
cexecuted = marshal.load(cache)
cache.close()
if isinstance(cexecuted, types.DictType):
self.cexecuted = cexecuted
except:
pass
# canonical_filename(filename). Return a canonical filename for the
# file (that is, an absolute path with no redundant components and
# normalized case). See [GDR 2001-12-04b, 3.3].
def canonical_filename(self, filename):
if not self.canonical_filename_cache.has_key(filename):
f = filename
if os.path.isabs(f) and not os.path.exists(f):
f = os.path.basename(f)
if not os.path.isabs(f):
for path in [os.curdir] + sys.path:
g = os.path.join(path, f)
if os.path.exists(g):
f = g
break
cf = os.path.normcase(os.path.abspath(f))
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
# canonicalize_filenames(). Copy results from "executed" to
# "cexecuted", canonicalizing filenames on the way. Clear the
# "executed" map.
def canonicalize_filenames(self):
global c
for filename, lineno in c.keys():
f = self.canonical_filename(filename)
if not self.cexecuted.has_key(f):
self.cexecuted[f] = {}
self.cexecuted[f][lineno] = 1
c = {}
# morf_filename(morf). Return the filename for a module or file.
def morf_filename(self, morf):
if isinstance(morf, types.ModuleType):
if not hasattr(morf, '__file__'):
raise self.error, "Module has no __file__ attribute."
file = morf.__file__
else:
file = morf
return self.canonical_filename(file)
# analyze_morf(morf). Analyze the module or filename passed as
# the argument. If the source code can't be found, raise an error.
# Otherwise, return a pair of (1) the canonical filename of the
# source code for the module, and (2) a list of lines of statements
# in the source code.
def analyze_morf(self, morf):
if self.analysis_cache.has_key(morf):
return self.analysis_cache[morf]
filename = self.morf_filename(morf)
ext = os.path.splitext(filename)[1]
if ext == '.pyc':
if not os.path.exists(filename[0:-1]):
raise self.error, ("No source for compiled code '%s'."
% filename)
filename = filename[0:-1]
elif ext != '.py':
raise self.error, "File '%s' not Python source." % filename
source = open(filename, 'r')
import parser
tree = parser.suite(source.read()).totuple(1)
source.close()
statements = {}
self.find_statements(tree, statements)
lines = statements.keys()
lines.sort()
result = filename, lines
self.analysis_cache[morf] = result
return result
# find_statements(tree, dict). Find each statement in the parse
# tree and record the line on which the statement starts in the
# dictionary (by assigning it to 1).
#
# It works by walking the whole tree depth-first. Every time it
# comes across a statement (symbol.stmt -- this includes compound
# statements like 'if' and 'while') it calls find_statement, which
# descends the tree below the statement to find the first terminal
# token in that statement and record the lines on which that token
# was found.
#
# This algorithm may find some lines several times (because of the
# grammar production statement -> compound statement -> statement),
# but that doesn't matter because we record lines as the keys of the
# dictionary.
#
# See also [GDR 2001-12-04b, 3.2].
def find_statements(self, tree, dict):
import symbol, token
if token.ISNONTERMINAL(tree[0]):
for t in tree[1:]:
self.find_statements(t, dict)
if tree[0] == symbol.stmt:
self.find_statement(tree[1], dict)
elif (tree[0] == token.NAME
and tree[1] in ['elif', 'except', 'finally']):
dict[tree[2]] = 1
def find_statement(self, tree, dict):
import token
while token.ISNONTERMINAL(tree[0]):
tree = tree[1]
dict[tree[2]] = 1
# format_lines(statements, lines). Format a list of line numbers
# for printing by coalescing groups of lines as long as the lines
# represent consecutive statements. This will coalesce even if
# there are gaps between statements, so if statements =
# [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
# format_lines will return "1-2, 5-11, 13-14".
def format_lines(self, statements, lines):
pairs = []
i = 0
j = 0
start = None
pairs = []
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j = j + 1
elif start:
pairs.append((start, end))
start = None
i = i + 1
if start:
pairs.append((start, end))
def stringify(pair):
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
import string
return string.join(map(stringify, pairs), ", ")
def analysis(self, morf):
filename, statements = self.analyze_morf(morf)
self.canonicalize_filenames()
if not self.cexecuted.has_key(filename):
self.cexecuted[filename] = {}
missing = []
for line in statements:
if not self.cexecuted[filename].has_key(line):
missing.append(line)
return (filename, statements, missing,
self.format_lines(statements, missing))
def morf_name(self, morf):
if isinstance(morf, types.ModuleType):
return morf.__name__
else:
return os.path.splitext(os.path.basename(morf))[0]
def report(self, morfs, show_missing=1, ignore_errors=0):
if not isinstance(morfs, types.ListType):
morfs = [morfs]
max_name = max([5,] + map(len, map(self.morf_name, morfs)))
fmt_name = "%%- %ds " % max_name
fmt_err = fmt_name + "%s: %s"
header = fmt_name % "Name" + " Stmts Exec Cover"
fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
if show_missing:
header = header + " Missing"
fmt_coverage = fmt_coverage + " %s"
print header
print "-" * len(header)
total_statements = 0
total_executed = 0
for morf in morfs:
name = self.morf_name(morf)
try:
_, statements, missing, readable = self.analysis(morf)
n = len(statements)
m = n - len(missing)
if n > 0:
pc = 100.0 * m / n
else:
pc = 100.0
args = (name, n, m, pc)
if show_missing:
args = args + (readable,)
print fmt_coverage % args
total_statements = total_statements + n
total_executed = total_executed + m
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
type, msg = sys.exc_info()[0:2]
print fmt_err % (name, type, msg)
if len(morfs) > 1:
print "-" * len(header)
if total_statements > 0:
pc = 100.0 * total_executed / total_statements
else:
pc = 100.0
args = ("TOTAL", total_statements, total_executed, pc)
if show_missing:
args = args + ("",)
print fmt_coverage % args
# annotate(morfs, ignore_errors).
blank_re = re.compile("\\s*(#|$)")
else_re = re.compile("\\s*else\\s*:\\s*(#|$)")
def annotate(self, morfs, directory=None, ignore_errors=0):
for morf in morfs:
try:
filename, statements, missing, _ = self.analysis(morf)
source = open(filename, 'r')
if directory:
dest_file = os.path.join(directory,
os.path.basename(filename)
+ ',cover')
else:
dest_file = filename + ',cover'
dest = open(dest_file, 'w')
lineno = 0
i = 0
j = 0
covered = 1
while 1:
line = source.readline()
if line == '':
break
lineno = lineno + 1
while i < len(statements) and statements[i] < lineno:
i = i + 1
while j < len(missing) and missing[j] < lineno:
j = j + 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only
# 'else:'. See [GDR 2001-12-04b, 3.2].
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
source.close()
dest.close()
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
raise
# Singleton object.
the_coverage = coverage()
# Module functions call methods in the singleton object.
def start(*args, **kw): return apply(the_coverage.start, args, kw)
def stop(*args, **kw): return apply(the_coverage.stop, args, kw)
def erase(*args, **kw): return apply(the_coverage.erase, args, kw)
def analysis(*args, **kw): return apply(the_coverage.analysis, args, kw)
def report(*args, **kw): return apply(the_coverage.report, args, kw)
# Save coverage data when Python exits. (The atexit module wasn't
# introduced until Python 2.0, so use sys.exitfunc when it's not
# available.)
try:
import atexit
atexit.register(the_coverage.save)
except ImportError:
sys.exitfunc = the_coverage.save
# Command-line interface.
if __name__ == '__main__':
the_coverage.command_line()
# A. REFERENCES
#
# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees;
# Ravenbrook Limited; 2001-12-04;
# <http://garethrees.org/2001/12/04/python-coverage/>.
#
# [GDR 2001-12-04b] "Statement coverage for Python: design and
# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04;
# <http://garethrees.org/2001/12/04/python-coverage/design.html>.
#
# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)";
# Guide van Rossum; 2001-07-20;
# <http://www.python.org/doc/2.1.1/ref/ref.html>.
#
# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum;
# 2001-07-20; <http://www.python.org/doc/2.1.1/lib/lib.html>.
#
#
# B. DOCUMENT HISTORY
#
# 2001-12-04 GDR Created.
#
# 2001-12-06 GDR Added command-line interface and source code
# annotation.
#
# 2001-12-09 GDR Moved design and interface to separate documents.
#
# 2001-12-10 GDR Open cache file as binary on Windows. Allow
# simultaneous -e and -x, or -a and -r.
#
# 2001-12-12 GDR Added command-line help. Cache analysis so that it
# only needs to be done once when you specify -a and -r.
#
# 2001-12-13 GDR Improved speed while recording. Portable between
# Python 1.5.2 and 2.1.1.
#
# 2002-01-03 GDR Module-level functions work correctly.
#
# 2002-01-07 GDR Update sys.path when running a file with the -x option,
# so that it matches the value the program would get if it were run on
# its own.
#
#
# C. COPYRIGHT AND LICENCE
#
# Copyright 2001 Gareth Rees. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
#
#
# $Id: //info.ravenbrook.com/user/gdr/www.garethrees.org/2001/12/04/python-coverage/coverage.py#9 $
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata
import atom
from gdata import test_data
import gdata.test_config as conf
class StartIndexTest(unittest.TestCase):
def setUp(self):
self.start_index = gdata.StartIndex()
def testToAndFromString(self):
self.start_index.text = '1'
self.assert_(self.start_index.text == '1')
new_start_index = gdata.StartIndexFromString(self.start_index.ToString())
self.assert_(self.start_index.text == new_start_index.text)
class ItemsPerPageTest(unittest.TestCase):
def setUp(self):
self.items_per_page = gdata.ItemsPerPage()
def testToAndFromString(self):
self.items_per_page.text = '10'
self.assert_(self.items_per_page.text == '10')
new_items_per_page = gdata.ItemsPerPageFromString(
self.items_per_page.ToString())
self.assert_(self.items_per_page.text == new_items_per_page.text)
class GDataEntryTest(unittest.TestCase):
def testIdShouldBeCleaned(self):
entry = gdata.GDataEntryFromString(test_data.XML_ENTRY_1)
element_tree = ElementTree.fromstring(test_data.XML_ENTRY_1)
self.assert_(element_tree.findall(
'{http://www.w3.org/2005/Atom}id')[0].text != entry.id.text)
self.assert_(entry.id.text == 'http://www.google.com/test/id/url')
def testGeneratorShouldBeCleaned(self):
feed = gdata.GDataFeedFromString(test_data.GBASE_FEED)
element_tree = ElementTree.fromstring(test_data.GBASE_FEED)
self.assert_(element_tree.findall('{http://www.w3.org/2005/Atom}generator'
)[0].text != feed.generator.text)
self.assert_(feed.generator.text == 'GoogleBase')
def testAllowsEmptyId(self):
entry = gdata.GDataEntry()
try:
entry.id = atom.Id()
except AttributeError:
self.fail('Empty id should not raise an attribute error.')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.GDataEntryFromString(test_data.XML_ENTRY_1)
def testLinkFinderGetsLicenseLink(self):
self.assertEquals(isinstance(self.entry.GetLicenseLink(), atom.Link),
True)
self.assertEquals(self.entry.GetLicenseLink().href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.GetLicenseLink().rel, 'license')
def testLinkFinderGetsAlternateLink(self):
self.assertEquals(isinstance(self.entry.GetAlternateLink(), atom.Link),
True)
self.assertEquals(self.entry.GetAlternateLink().href,
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.GetAlternateLink().rel, 'alternate')
class GDataFeedTest(unittest.TestCase):
def testCorrectConversionToElementTree(self):
test_feed = gdata.GDataFeedFromString(test_data.GBASE_FEED)
self.assert_(test_feed.total_results is not None)
element_tree = test_feed._ToElementTree()
feed = element_tree.find('{http://www.w3.org/2005/Atom}feed')
self.assert_(element_tree.find(
'{http://a9.com/-/spec/opensearchrss/1.0/}totalResults') is not None)
def testAllowsEmptyId(self):
feed = gdata.GDataFeed()
try:
feed.id = atom.Id()
except AttributeError:
self.fail('Empty id should not raise an attribute error.')
class BatchEntryTest(unittest.TestCase):
def testCorrectConversionFromAndToString(self):
batch_entry = gdata.BatchEntryFromString(test_data.BATCH_ENTRY)
self.assertEquals(batch_entry.batch_id.text, 'itemB')
self.assertEquals(batch_entry.id.text,
'http://www.google.com/base/feeds/items/'
'2173859253842813008')
self.assertEquals(batch_entry.batch_operation.type, 'insert')
self.assertEquals(batch_entry.batch_status.code, '201')
self.assertEquals(batch_entry.batch_status.reason, 'Created')
new_entry = gdata.BatchEntryFromString(str(batch_entry))
self.assertEquals(batch_entry.batch_id.text, new_entry.batch_id.text)
self.assertEquals(batch_entry.id.text, new_entry.id.text)
self.assertEquals(batch_entry.batch_operation.type,
new_entry.batch_operation.type)
self.assertEquals(batch_entry.batch_status.code,
new_entry.batch_status.code)
self.assertEquals(batch_entry.batch_status.reason,
new_entry.batch_status.reason)
class BatchFeedTest(unittest.TestCase):
def setUp(self):
self.batch_feed = gdata.BatchFeed()
self.example_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test')
def testConvertRequestFeed(self):
batch_feed = gdata.BatchFeedFromString(test_data.BATCH_FEED_REQUEST)
self.assertEquals(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
self.assertEquals(batch_feed.title.text, 'My Batch Feed')
new_feed = gdata.BatchFeedFromString(str(batch_feed))
self.assertEquals(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
self.assertEquals(new_feed.title.text, 'My Batch Feed')
def testConvertResultFeed(self):
batch_feed = gdata.BatchFeedFromString(test_data.BATCH_FEED_RESULT)
self.assertEquals(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
if entry.id.text == ('http://www.google.com/base/feeds/items/'
'2173859253842813008'):
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_id.text, 'itemB')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(batch_feed.title.text, 'My Batch')
new_feed = gdata.BatchFeedFromString(str(batch_feed))
self.assertEquals(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
if entry.id.text == ('http://www.google.com/base/feeds/items/'
'2173859253842813008'):
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_id.text, 'itemB')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(new_feed.title.text, 'My Batch')
def testAddBatchEntry(self):
try:
self.batch_feed.AddBatchEntry(batch_id_string='a')
self.fail('AddBatchEntry with neither entry or URL should raise Error')
except gdata.MissingRequiredParameters:
pass
new_entry = self.batch_feed.AddBatchEntry(
id_url_string='http://example.com/1')
self.assertEquals(len(self.batch_feed.entry), 1)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0')
self.assertEquals(new_entry.id.text, 'http://example.com/1')
self.assertEquals(new_entry.batch_id.text, '0')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
batch_id_string='foo')
self.assertEquals(new_entry.batch_id.text, 'foo')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'),
batch_id=gdata.BatchId(text='bar'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId',
batch_id_string='foo')
self.assertEquals(new_entry.batch_id.text, 'foo')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'),
batch_id=gdata.BatchId(text='bar'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId')
self.assertEquals(new_entry.batch_id.text, 'bar')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'),
batch_id=gdata.BatchId(text='bar'),
batch_operation=gdata.BatchOperation(
op_type=gdata.BATCH_INSERT))
self.assertEquals(to_add.batch_operation.type, gdata.BATCH_INSERT)
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId', batch_id_string='foo',
operation_string=gdata.BATCH_UPDATE)
self.assertEquals(new_entry.batch_operation.type, gdata.BATCH_UPDATE)
def testAddInsert(self):
first_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test1')
self.batch_feed.AddInsert(first_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0')
second_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/2'), text='This is a test2')
self.batch_feed.AddInsert(second_entry, batch_id_string='foo')
self.assertEquals(self.batch_feed.entry[1].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[1].batch_id.text, 'foo')
third_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/3'), text='This is a test3')
third_entry.batch_operation = gdata.BatchOperation(
op_type=gdata.BATCH_DELETE)
# Add an entry with a delete operation already assigned.
self.batch_feed.AddInsert(third_entry)
# The batch entry should not have the original operation, it should
# have been changed to an insert.
self.assertEquals(self.batch_feed.entry[2].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[2].batch_id.text, '2')
def testAddDelete(self):
# Try deleting an entry
delete_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test')
self.batch_feed.AddDelete(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].text, 'This is a test')
# Try deleting a URL
self.batch_feed.AddDelete(url_string='http://example.com/2')
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEquals(self.batch_feed.entry[1].id.text,
'http://example.com/2')
self.assert_(self.batch_feed.entry[1].text is None)
def testAddQuery(self):
# Try querying with an existing batch entry
delete_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'))
self.batch_feed.AddQuery(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_QUERY)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
# Try querying a URL
self.batch_feed.AddQuery(url_string='http://example.com/2')
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_QUERY)
self.assertEquals(self.batch_feed.entry[1].id.text,
'http://example.com/2')
def testAddUpdate(self):
# Try updating an entry
delete_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test')
self.batch_feed.AddUpdate(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_UPDATE)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].text, 'This is a test')
class ExtendedPropertyTest(unittest.TestCase):
def testXmlBlobRoundTrip(self):
ep = gdata.ExtendedProperty(name='blobby')
ep.SetXmlBlob('<some_xml attr="test"/>')
extension = ep.GetXmlBlobExtensionElement()
self.assertEquals(extension.tag, 'some_xml')
self.assert_(extension.namespace is None)
self.assertEquals(extension.attributes['attr'], 'test')
ep2 = gdata.ExtendedPropertyFromString(ep.ToString())
extension = ep2.GetXmlBlobExtensionElement()
self.assertEquals(extension.tag, 'some_xml')
self.assert_(extension.namespace is None)
self.assertEquals(extension.attributes['attr'], 'test')
def testGettersShouldReturnNoneWithNoBlob(self):
ep = gdata.ExtendedProperty(name='no blob')
self.assert_(ep.GetXmlBlobExtensionElement() is None)
self.assert_(ep.GetXmlBlobString() is None)
def testGettersReturnCorrectTypes(self):
ep = gdata.ExtendedProperty(name='has blob')
ep.SetXmlBlob('<some_xml attr="test"/>')
self.assert_(isinstance(ep.GetXmlBlobExtensionElement(),
atom.ExtensionElement))
self.assert_(isinstance(ep.GetXmlBlobString(), str))
class FeedLinkTest(unittest.TestCase):
def testCorrectFromStringType(self):
link = gdata.FeedLinkFromString(
'<feedLink xmlns="http://schemas.google.com/g/2005" countHint="5"/>')
self.assert_(isinstance(link, gdata.FeedLink))
self.assertEqual(link.count_hint, '5')
def suite():
return conf.build_suite([StartIndexTest, StartIndexTest, GDataEntryTest,
LinkFinderTest, GDataFeedTest, BatchEntryTest, BatchFeedTest,
ExtendedPropertyTest, FeedLinkTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
import gdata.test_config as conf
conf.options.set_value('runlive', 'true')
conf.options.set_value('savecache', 'true')
conf.options.set_value('clearcache', 'false')
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import unittest
class ModuleTestRunner(object):
def __init__(self, module_list=None, module_settings=None):
"""Constructor for a runner to run tests in the modules listed.
Args:
module_list: list (optional) The modules whose test cases will be run.
module_settings: dict (optional) A dictionary of module level varables
which should be set in the modules if they are present. An
example is the username and password which is a module variable
in most service_test modules.
"""
self.modules = module_list or []
self.settings = module_settings or {}
def RunAllTests(self):
"""Executes all tests in this objects modules list.
It also sets any module variables which match the settings keys to the
corresponding values in the settings member.
"""
runner = unittest.TextTestRunner()
for module in self.modules:
# Set any module variables according to the contents in the settings
for setting, value in self.settings.iteritems():
try:
setattr(module, setting, value)
except AttributeError:
# This module did not have a variable for the current setting, so
# we skip it and try the next setting.
pass
# We have set all of the applicable settings for the module, now
# run the tests.
print '\nRunning all tests in module', module.__name__
runner.run(unittest.defaultTestLoader.loadTestsFromModule(module))
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
import gdata.test_config as conf
conf.options.set_value('runlive', 'false')
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
from gdata.test_config import settings
settings.RUN_LIVE_TESTS = True
settings.CACHE_RESPONSES = True
settings.CLEAR_CACHE = True
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
# Tests for v2 features.
import atom_tests.core_test
import atom_tests.data_test
import atom_tests.http_core_test
import atom_tests.auth_test
import atom_tests.mock_http_core_test
import atom_tests.client_test
import gdata_tests.client_test
import gdata_tests.core_test
import gdata_tests.data_test
import gdata_tests.data_smoke_test
import gdata_tests.client_smoke_test
import gdata_tests.live_client_test
import gdata_tests.gauth_test
import gdata_tests.blogger.data_test
import gdata_tests.blogger.live_client_test
import gdata_tests.spreadsheets.data_test
import gdata_tests.spreadsheets.live_client_test
import gdata_tests.projecthosting.data_test
import gdata_tests.projecthosting.live_client_test
import gdata_tests.sites.data_test
import gdata_tests.sites.live_client_test
import gdata_tests.analytics.data_test
import gdata_tests.analytics.live_client_test
import gdata_tests.contacts.live_client_test
import gdata_tests.contacts.profiles.live_client_test
import gdata_tests.calendar_resource.live_client_test
import gdata_tests.calendar_resource.data_test
import gdata_tests.apps.emailsettings.data_test
import gdata_tests.apps.emailsettings.live_client_test
import gdata_tests.youtube.live_client_test
def suite():
return unittest.TestSuite((
gdata_tests.contacts.profiles.live_client_test.suite(),
atom_tests.core_test.suite(),
atom_tests.data_test.suite(),
atom_tests.http_core_test.suite(),
atom_tests.auth_test.suite(),
atom_tests.mock_http_core_test.suite(),
atom_tests.client_test.suite(),
gdata_tests.client_test.suite(),
gdata_tests.core_test.suite(),
gdata_tests.data_test.suite(),
gdata_tests.data_smoke_test.suite(),
gdata_tests.client_smoke_test.suite(),
gdata_tests.live_client_test.suite(),
gdata_tests.gauth_test.suite(),
gdata_tests.blogger.data_test.suite(),
gdata_tests.blogger.live_client_test.suite(),
gdata_tests.spreadsheets.data_test.suite(),
gdata_tests.spreadsheets.live_client_test.suite(),
gdata_tests.projecthosting.data_test.suite(),
gdata_tests.projecthosting.live_client_test.suite(),
gdata_tests.sites.data_test.suite(),
gdata_tests.sites.live_client_test.suite(),
gdata_tests.analytics.data_test.suite(),
gdata_tests.analytics.live_client_test.suite(),
gdata_tests.contacts.live_client_test.suite(),
gdata_tests.calendar_resource.live_client_test.suite(),
gdata_tests.calendar_resource.data_test.suite(),
gdata_tests.apps.emailsettings.live_client_test.suite(),
gdata_tests.apps.emailsettings.data_test.suite(),
gdata_tests.youtube.live_client_test.suite(),
))
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
import os
root_path = os.path.dirname(__file__)
| Python |
"""Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) != n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| Python |
"""Tests for query.py."""
import os
import re
import sys
import time
import unittest
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import datastore_query
from ndb import context
from ndb import model
from ndb import query
from ndb import tasklets
from ndb import test_utils
class QueryTests(test_utils.DatastoreTest):
def setUp(self):
super(QueryTests, self).setUp()
tasklets.set_context(context.Context())
# Create class inside tests because kinds are cleared every test.
global Foo
class Foo(model.Model):
name = model.StringProperty()
rate = model.IntegerProperty()
tags = model.StringProperty(repeated=True)
self.create_entities()
def create_entities(self):
self.joe = Foo(name='joe', tags=['joe', 'jill', 'hello'], rate=1)
self.joe.put()
self.jill = Foo(name='jill', tags=['jack', 'jill'], rate=2)
self.jill.put()
self.moe = Foo(name='moe', rate=1)
self.moe.put()
def testBasicQuery(self):
q = query.Query(kind='Foo')
q = q.filter(Foo.name >= 'joe').filter(Foo.name <= 'moe').filter()
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testOrderedQuery(self):
q = query.Query(kind='Foo')
q = q.order(Foo.rate).order().order(-Foo.name)
res = list(q)
self.assertEqual(res, [self.moe, self.joe, self.jill])
def testQueryAttributes(self):
q = query.Query(kind='Foo')
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, None)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
key = model.Key('Barba', 'papa')
q = query.Query(kind='Foo', ancestor=key)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, None)
self.assertEqual(q.orders, None)
q = q.filter(Foo.rate == 1)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
self.assertEqual(q.orders, None)
q = q.order(-Foo.name)
self.assertEqual(q.kind, 'Foo')
self.assertEqual(q.ancestor, key)
self.assertEqual(q.filters, query.FilterNode('rate', '=', 1))
expected_order = [('name', query.DESC)]
self.assertEqual(query.orders_to_orderings(q.orders), expected_order)
def testModernQuerySyntax(self):
class Employee(model.Model):
name = model.StringProperty()
age = model.IntegerProperty('Age')
rank = model.IntegerProperty()
@classmethod
def seniors(cls, min_age, min_rank):
q = cls.query().filter(cls.age >= min_age, cls.rank <= min_rank)
q = q.order(cls.name, -cls.age)
return q
q = Employee.seniors(42, 5)
self.assertEqual(q.filters,
query.ConjunctionNode(
[query.FilterNode('Age', '>=', 42),
query.FilterNode('rank', '<=', 5)]))
self.assertEqual(query.orders_to_orderings(q.orders),
[('name', query.ASC), ('Age', query.DESC)])
def testQueryForStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
b1 = Bar(name='b1', foo=Foo(name='nest', rate=1, tags=['tag1', 'tag2']))
b1.put()
b2 = Bar(name='b2', foo=Foo(name='best', rate=2, tags=['tag2', 'tag3']))
b2.put()
b3 = Bar(name='b3', foo=Foo(name='rest', rate=2, tags=['tag2']))
b3.put()
q1 = Bar.query().order(Bar.name)
self.assertEqual(q1.fetch(10), [b1, b2, b3])
q2 = Bar.query().filter(Bar.foo.rate >= 2)
self.assertEqual(q2.fetch(10), [b2, b3])
q3 = q2.order(Bar.foo.rate, -Bar.foo.name, +Bar.foo.rate)
self.assertEqual(q3.fetch(10), [b3, b2])
def testQueryForNestedStructuredProperty(self):
class Bar(model.Model):
name = model.StringProperty()
foo = model.StructuredProperty(Foo)
class Bak(model.Model):
bar = model.StructuredProperty(Bar)
class Baz(model.Model):
bar = model.StructuredProperty(Bar)
bak = model.StructuredProperty(Bak)
rank = model.IntegerProperty()
b1 = Baz(bar=Bar(foo=Foo(name='a')))
b1.put()
b2 = Baz(bar=Bar(foo=Foo(name='b')), bak=Bak(bar=Bar(foo=Foo(name='c'))))
b2.put()
q1 = Baz.query().filter(Baz.bar.foo.name >= 'a')
self.assertEqual(q1.fetch(10), [b1, b2])
q2 = Baz.query().filter(Baz.bak.bar.foo.name >= 'a')
self.assertEqual(q2.fetch(10), [b2])
def testQueryForWholeStructure(self):
class Employee(model.Model):
name = model.StringProperty()
rank = model.IntegerProperty()
class Manager(Employee):
report = model.StructuredProperty(Employee, repeated=True)
reports_a = []
for i in range(3):
e = Employee(name=str(i), rank=i)
e.put()
reports_a.append(e)
reports_b = []
for i in range(3, 6):
e = Employee(name=str(i), rank=0)
e.put()
reports_b.append(e)
mgr_a = Manager(name='a', report=reports_a)
mgr_a.put()
mgr_b = Manager(name='b', report=reports_b)
mgr_b.put()
mgr_c = Manager(name='c', report=reports_a + reports_b)
mgr_c.put()
res = list(Manager.query(Manager.report == Employee(name='1', rank=1)))
self.assertEqual(res, [mgr_a, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0)))
self.assertEqual(res, [mgr_a, mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='3')))
self.assertEqual(res, [mgr_b, mgr_c])
res = list(Manager.query(Manager.report == Employee(rank=0, name='1')))
self.assertEqual(res, [])
res = list(Manager.query(Manager.report == Employee(rank=0, name='0'),
Manager.report == Employee(rank=1, name='1')))
self.assertEqual(res, [mgr_a, mgr_c])
q = Manager.query(Manager.report == Employee(rank=2, name='2'))
res = list(q)
self.assertEqual(res, [mgr_a, mgr_c])
res = list(q.iter(options=query.QueryOptions(offset=1)))
self.assertEqual(res, [mgr_c])
res = list(q.iter(options=query.QueryOptions(limit=1)))
self.assertEqual(res, [mgr_a])
def testMultiQuery(self):
q1 = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
q2 = query.Query(kind='Foo').filter(Foo.tags == 'joe').order(Foo.name)
qq = query.MultiQuery([q1, q2],
query.ordering_to_order(('name', query.ASC)))
res = list(qq)
self.assertEqual(res, [self.jill, self.joe])
def testIterAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testMap(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
self.assertEqual(q.map(callback), ['jill', 'joe'])
self.assertEqual(q.map(callback_async), ['jill', 'joe'])
def testMapAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
callback = lambda e: e.name
@tasklets.tasklet
def callback_async(e):
yield tasklets.sleep(0.01)
raise tasklets.Return(e.name)
@tasklets.synctasklet
def foo():
fut = q.map_async(callback)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
fut = q.map_async(callback_async)
res = yield fut
self.assertEqual(res, ['jill', 'joe'])
foo()
def testFetch(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10), [self.jill, self.joe])
self.assertEqual(q.fetch(1), [self.jill])
def testFetchAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.fetch_async(10)
self.assertEqual(res, [self.jill, self.joe])
res = yield q.fetch_async(1)
self.assertEqual(res, [self.jill])
foo()
def testFetchEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.fetch(1), [])
def testFetchKeysOnly(self):
qo = query.QueryOptions(keys_only=True)
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.fetch(10, options=qo), [self.jill.key, self.joe.key])
def testGet(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(), self.jill)
def testGetEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.get(), None)
def testGetKeysOnly(self):
qo = query.QueryOptions(keys_only=True)
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.get(options=qo), self.jill.key)
def testCount(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
self.assertEqual(q.count(10), 2)
self.assertEqual(q.count(1), 1)
def testCountAsync(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jill').order(Foo.name)
@tasklets.synctasklet
def foo():
res = yield q.count_async(10)
self.assertEqual(res, 2)
res = yield q.count_async(1)
self.assertEqual(res, 1)
foo()
def testCountEmpty(self):
q = query.Query(kind='Foo').filter(Foo.tags == 'jillian')
self.assertEqual(q.count(1), 0)
def testMultiQueryIterator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['joe', 'jill']))
q = q.order(Foo.name)
@tasklets.synctasklet
def foo():
it = iter(q)
res = []
while (yield it.has_next_async()):
val = it.next()
res.append(val)
self.assertEqual(res, [self.jill, self.joe])
foo()
def testNotEqualOperator(self):
q = query.Query(kind='Foo').filter(Foo.rate != 2)
res = list(q)
self.assertEqual(res, [self.joe, self.moe])
def testInOperator(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(('jill', 'hello')))
res = list(q)
self.assertEqual(res, [self.joe, self.jill])
def testFullDistributiveLaw(self):
q = query.Query(kind='Foo').filter(Foo.tags.IN(['jill', 'hello']))
q = q.filter(Foo.rate.IN([1, 2]))
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
expected = DisjunctionNode(
[ConjunctionNode([FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1)]),
ConjunctionNode([FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 2)]),
ConjunctionNode([FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1)]),
ConjunctionNode([FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 2)])])
self.assertEqual(q.filters, expected)
def testHalfDistributiveLaw(self):
DisjunctionNode = query.DisjunctionNode
ConjunctionNode = query.ConjunctionNode
FilterNode = query.FilterNode
filters = ConjunctionNode(
[FilterNode('tags', 'in', ['jill', 'hello']),
ConjunctionNode([FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')])])
expected = DisjunctionNode(
[ConjunctionNode([FilterNode('tags', '=', 'jill'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')]),
ConjunctionNode([FilterNode('tags', '=', 'hello'),
FilterNode('rate', '=', 1),
FilterNode('name', '=', 'moe')])])
self.assertEqual(filters, expected)
def testGqlMinimal(self):
qry, options, bindings = query.parse_gql('SELECT * FROM Kind')
self.assertEqual(qry.kind, 'Kind')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
self.assertEqual(bindings, {})
def testGqlAncestor(self):
qry, options, bindings = query.parse_gql(
'SELECT * FROM Kind WHERE ANCESTOR IS :1')
self.assertEqual(qry.kind, 'Kind')
self.assertEqual(qry.ancestor, query.Binding(None, 1))
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
self.assertEqual(bindings, {1: query.Binding(None, 1)})
def testGqlAncestor(self):
key = model.Key('Foo', 42)
qry, options, bindings = query.parse_gql(
"SELECT * FROM Kind WHERE ANCESTOR IS KEY('%s')" % key.urlsafe())
self.assertEqual(qry.kind, 'Kind')
self.assertEqual(qry.ancestor, key)
self.assertEqual(qry.filters, None)
self.assertEqual(qry.orders, None)
self.assertEqual(bindings, {})
def testGqlFilter(self):
qry, options, bindings = query.parse_gql(
"SELECT * FROM Kind WHERE prop1 = 1 AND prop2 = 'a'")
self.assertEqual(qry.kind, 'Kind')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
[query.FilterNode('prop1', '=', 1),
query.FilterNode('prop2', '=', 'a')]))
self.assertEqual(qry.orders, None)
self.assertEqual(bindings, {})
def testGqlOrder(self):
qry, options, bindings = query.parse_gql(
'SELECT * FROM Kind ORDER BY prop1')
self.assertEqual(query.orders_to_orderings(qry.orders),
[('prop1', query.ASC)])
def testGqlOffset(self):
qry, options, bindings = query.parse_gql(
'SELECT * FROM Kind OFFSET 2')
self.assertEqual(options.offset, 2)
def testGqlLimit(self):
qry, options, bindings = query.parse_gql(
'SELECT * FROM Kind LIMIT 2')
self.assertEqual(options.limit, 2)
def testGqlBindings(self):
qry, options, bindings = query.parse_gql(
'SELECT * FROM Kind WHERE prop1 = :1 AND prop2 = :foo')
self.assertEqual(qry.kind, 'Kind')
self.assertEqual(qry.ancestor, None)
self.assertEqual(qry.filters,
query.ConjunctionNode(
[query.FilterNode('prop1', '=',
query.Binding(None, 1)),
query.FilterNode('prop2', '=',
query.Binding(None, 'foo'))]))
self.assertEqual(qry.orders, None)
self.assertEqual(bindings, {1: query.Binding(None, 1),
'foo': query.Binding(None, 'foo')})
def testResolveBindings(self):
qry, options, bindings = query.parse_gql(
'SELECT * FROM Foo WHERE name = :1')
bindings[1].value = 'joe'
self.assertEqual(list(qry), [self.joe])
bindings[1].value = 'jill'
self.assertEqual(list(qry), [self.jill])
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""An event loop.
This event loop should handle both asynchronous App Engine RPC objects
(specifically urlfetch and datastore RPC objects) and arbitrary
callback functions with an optional time delay.
Normally, event loops are singleton objects, though there is no
enforcement of this requirement.
The API here is inspired by Monocle.
"""
import bisect
import logging
import os
import time
from google.appengine.api.apiproxy_rpc import RPC
from google.appengine.datastore import datastore_rpc
IDLE = RPC.IDLE
RUNNING = RPC.RUNNING
FINISHING = RPC.FINISHING
class EventLoop(object):
"""An event loop."""
# TODO: Use a separate queue for tasklets with delay=None.
def __init__(self):
"""Constructor."""
self.queue = []
self.rpcs = {}
# TODO: Rename to queue_callback?
def queue_call(self, delay, callable, *args, **kwds):
"""Schedule a function call at a specific time in the future."""
if delay is None:
when = 0
elif delay < 1e9:
when = delay + time.time()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
bisect.insort(self.queue, (when, callable, args, kwds))
def queue_rpc(self, rpc, callable=None, *args, **kwds):
"""Schedule an RPC with an optional callback.
The caller must have previously sent the call to the service.
The optional callback is called with the remaining arguments.
NOTE: If the rpc is a MultiRpc, the callback will be called once
for each sub-RPC. TODO: Is this a good idea?
"""
if rpc is None:
return
assert rpc.state in (RUNNING, FINISHING), rpc.state
if isinstance(rpc, datastore_rpc.MultiRpc):
rpcs = rpc.rpcs
else:
rpcs = [rpc]
for rpc in rpcs:
self.rpcs[rpc] = (callable, args, kwds)
# TODO: A way to add a datastore Connection
def run0(self):
"""Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
"""
delay = None
if self.queue:
delay = self.queue[0][0] - time.time()
if delay is None or delay <= 0:
when, callable, args, kwds = self.queue.pop(0)
logging.debug('event: %s', callable.__name__)
callable(*args, **kwds)
# TODO: What if it raises an exception?
return 0
if self.rpcs:
rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs)
if rpc is not None:
logging.debug('rpc: %s', rpc.method)
# Yes, wait_any() may return None even for a non-empty argument.
# But no, it won't ever return an RPC not in its argument.
assert rpc in self.rpcs, (rpc, self.rpcs)
callable, args, kwds = self.rpcs[rpc]
del self.rpcs[rpc]
if callable is not None:
callable(*args, **kwds)
# TODO: Again, what about exceptions?
return 0
return delay
def run1(self):
"""Run one item (a callback or an RPC wait_any) or sleep.
Returns:
True if something happened; False if all queues are empty.
"""
delay = self.run0()
if delay is None:
return False
if delay > 0:
time.sleep(delay)
return True
def run(self):
"""Run until there's nothing left to do."""
# TODO: A way to stop running before the queue is empty.
while True:
if not self.run1():
break
_EVENT_LOOP_KEY = '__EVENT_LOOP__'
_event_loop = None
def get_event_loop():
"""Return a singleton EventLoop instance.
A new singleton is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request.
"""
# TODO: Use thread-local storage?
global _event_loop
ev = None
if os.getenv(_EVENT_LOOP_KEY):
ev = _event_loop
if ev is None:
ev = EventLoop()
_event_loop = ev
os.environ[_EVENT_LOOP_KEY] = '1'
return ev
def queue_call(*args, **kwds):
ev = get_event_loop()
ev.queue_call(*args, **kwds)
def queue_rpc(rpc, callable=None, *args, **kwds):
ev = get_event_loop()
ev.queue_rpc(rpc, callable, *args, **kwds)
def run():
ev = get_event_loop()
ev.run()
def run1():
ev = get_event_loop()
return ev.run1()
def run0():
ev = get_event_loop()
return ev.run0()
| Python |
import logging
import os
import sys
def wrapping(wrapped):
# A decorator to decorate a decorator's wrapper. Following the lead
# of Twisted and Monocle, this is supposed to make debugging heavily
# decorated code easier. We'll see...
# TODO: Evaluate; so far it hasn't helped (nor hurt).
def wrapping_wrapper(wrapper):
wrapper.__name__ = wrapped.__name__
wrapper.__doc__ = wrapped.__doc__
wrapper.__dict__.update(wrapped.__dict__)
return wrapper
return wrapping_wrapper
def get_stack(limit=10):
# Return a list of strings showing where the current frame was called.
frame = sys._getframe(1) # Always skip get_stack() itself.
lines = []
while len(lines) < limit and frame is not None:
locals = frame.f_locals
ndb_debug = locals.get('__ndb_debug__')
if ndb_debug != 'SKIP':
line = frame_info(frame)
if ndb_debug is not None:
line += ' # ' + str(ndb_debug)
lines.append(line)
frame = frame.f_back
return lines
def func_info(func, lineno=None):
code = func.func_code
return code_info(code, lineno)
def gen_info(gen):
frame = gen.gi_frame
if gen.gi_running:
prefix = 'running generator '
elif frame:
if frame.f_lasti < 0:
prefix = 'initial generator '
else:
prefix = 'suspended generator '
else:
prefix = 'terminated generator '
if frame:
return prefix + frame_info(frame)
code = getattr(gen, 'gi_code', None)
if code:
return prefix + code_info(code)
return prefix + hex(id(gen))
def frame_info(frame):
return code_info(frame.f_code, frame.f_lineno)
def code_info(code, lineno=None):
funcname = code.co_name
# TODO: Be cleverer about stripping filename,
# e.g. strip based on sys.path.
filename = os.path.basename(code.co_filename)
if lineno is None:
lineno = code.co_firstlineno
return '%s(%s:%s)' % (funcname, filename, lineno)
# Hack for running tests with verbose logging. If there are two or
# more -v flags, turn on INFO logging; if there are 3 or more, DEBUG.
# (A single -v just tells unittest.main() to print the name of each
# test; we don't want to interfere with that.)
if sys.argv[0].endswith('_test.py'):
v = 0
for arg in sys.argv[1:]:
if arg.startswith('-v'):
v += arg.count('v')
if v >= 2:
level = logging.INFO
if v >= 3:
level = logging.DEBUG
logging.basicConfig(level=level)
| Python |
"""Tests for eventloop.py."""
import os
import time
import unittest
from google.appengine.datastore import datastore_rpc
from ndb import eventloop, test_utils
class EventLoopTests(test_utils.DatastoreTest):
def setUp(self):
super(EventLoopTests, self).setUp()
if eventloop._EVENT_LOOP_KEY in os.environ:
del os.environ[eventloop._EVENT_LOOP_KEY]
self.ev = eventloop.get_event_loop()
def testQueueTasklet(self):
def f(): return 1
def g(): return 2
def h(): return 3
t_before = time.time()
eventloop.queue_call(1, f, 42, 'hello', a=1, b=2)
eventloop.queue_call(3, h, c=3, d=4)
eventloop.queue_call(2, g, 100, 'abc')
t_after = time.time()
self.assertEqual(len(self.ev.queue), 3)
[(t1, f1, a1, k1), (t2, f2, a2, k2), (t3, f3, a3, k3)] = self.ev.queue
self.assertTrue(t1 < t2)
self.assertTrue(t2 < t3)
self.assertTrue(abs(t1 - (t_before + 1)) < t_after - t_before)
self.assertTrue(abs(t2 - (t_before + 2)) < t_after - t_before)
self.assertTrue(abs(t3 - (t_before + 3)) < t_after - t_before)
self.assertEqual(f1, f)
self.assertEqual(f2, g)
self.assertEqual(f3, h)
self.assertEqual(a1, (42, 'hello'))
self.assertEqual(a2, (100, 'abc'))
self.assertEqual(a3, ())
self.assertEqual(k1, {'a': 1, 'b': 2})
self.assertEqual(k2, {})
self.assertEqual(k3, {'c': 3, 'd': 4})
def testRun(self):
record = []
def foo(arg):
record.append(arg)
eventloop.queue_call(0.2, foo, 42)
eventloop.queue_call(0.1, foo, arg='hello')
eventloop.run()
self.assertEqual(record, ['hello', 42])
def testRunWithRpcs(self):
record = []
def foo(arg):
record.append(arg)
eventloop.queue_call(0.1, foo, 42)
config = datastore_rpc.Configuration(on_completion=foo)
rpc = self.conn.async_get(config, [])
self.assertEqual(len(rpc.rpcs), 1)
eventloop.queue_rpc(rpc)
eventloop.run()
self.assertEqual(record, [rpc.rpcs[0], 42])
self.assertEqual(rpc.state, 2) # TODO: Use apiproxy_rpc.RPC.FINISHING.
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Tests for context.py."""
import logging
import os
import re
import sys
import time
import unittest
from google.appengine.api import memcache
from google.appengine.datastore import datastore_rpc
from ndb import context
from ndb import eventloop
from ndb import model
from ndb import query
from ndb import tasklets
from ndb import test_utils
class MyAutoBatcher(context.AutoBatcher):
_log = []
@classmethod
def reset_log(cls):
cls._log = []
def __init__(self, todo_tasklet):
def wrap(*args):
self.__class__._log.append(args)
return todo_tasklet(*args)
super(MyAutoBatcher, self).__init__(wrap)
class ContextTests(test_utils.DatastoreTest):
def setUp(self):
super(ContextTests, self).setUp()
self.set_up_eventloop()
MyAutoBatcher.reset_log()
self.ctx = context.Context(
conn=model.make_connection(default_model=model.Expando),
auto_batcher_class=MyAutoBatcher)
def set_up_eventloop(self):
if eventloop._EVENT_LOOP_KEY in os.environ:
del os.environ[eventloop._EVENT_LOOP_KEY]
self.ev = eventloop.get_event_loop()
self.log = []
def testContext_AutoBatcher_Get(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=['Foo', 1])
key2 = model.Key(flat=['Foo', 2])
key3 = model.Key(flat=['Foo', 3])
fut1 = self.ctx.get(key1)
fut2 = self.ctx.get(key2)
fut3 = self.ctx.get(key3)
ent1 = yield fut1
ent2 = yield fut2
ent3 = yield fut3
raise tasklets.Return([ent1, ent2, ent3])
ents = foo().get_result()
self.assertEqual(ents, [None, None, None])
self.assertEqual(len(MyAutoBatcher._log), 1)
@tasklets.tasklet
def create_entities(self):
key0 = model.Key(flat=['Foo', None])
ent1 = model.Model(key=key0)
ent2 = model.Model(key=key0)
ent3 = model.Model(key=key0)
fut1 = self.ctx.put(ent1)
fut2 = self.ctx.put(ent2)
fut3 = self.ctx.put(ent3)
key1 = yield fut1
key2 = yield fut2
key3 = yield fut3
raise tasklets.Return([key1, key2, key3])
def testContext_AutoBatcher_Put(self):
keys = self.create_entities().get_result()
self.assertEqual(len(keys), 3)
self.assertTrue(None not in keys)
self.assertEqual(len(MyAutoBatcher._log), 1)
def testContext_AutoBatcher_Delete(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=['Foo', 1])
key2 = model.Key(flat=['Foo', 2])
key3 = model.Key(flat=['Foo', 3])
fut1 = self.ctx.delete(key1)
fut2 = self.ctx.delete(key2)
fut3 = self.ctx.delete(key3)
yield fut1
yield fut2
yield fut3
foo().check_success()
self.assertEqual(len(MyAutoBatcher._log), 1)
def testContext_Cache(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
key = yield self.ctx.put(ent1)
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
b = yield self.ctx.get(key1)
self.assertTrue(a is b)
yield self.ctx.delete(key1)
self.assertTrue(self.ctx._cache[key] is None) # Whitebox.
a = yield self.ctx.get(key1)
self.assertTrue(a is None)
foo().check_success()
def testContext_CachePolicy(self):
def should_cache(key):
return False
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
key = yield self.ctx.put(ent1)
self.assertTrue(key1 not in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
b = yield self.ctx.get(key1)
self.assertTrue(a is not b)
yield self.ctx.delete(key1)
self.assertTrue(key not in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
self.assertTrue(a is None)
self.ctx.set_cache_policy(should_cache)
foo().check_success()
def testContext_CachePolicyDisabledLater(self):
# If the cache is disabled after an entity is stored in the cache,
# further get() attempts *must not* return the result stored in cache.
self.ctx.set_cache_policy(lambda key: True)
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1)
self.ctx.put(ent1).get_result()
# get() uses cache
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
self.assertEqual(self.ctx.get(key1).get_result(), ent1)
# get() uses cache
self.ctx._cache[key1] = None # Whitebox.
self.assertEqual(self.ctx.get(key1).get_result(), None)
# get() doesn't use cache
self.ctx.set_cache_policy(lambda key: False)
self.assertEqual(self.ctx.get(key1).get_result(), ent1)
def testContext_Memcache(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
key2 = model.Key(flat=('Foo', 2))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
k1, k2 = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.assertEqual(k1, key1)
self.assertEqual(k2, key2)
yield tasklets.sleep(0.01) # Let other tasklet complete.
keys = [k1.urlsafe(), k2.urlsafe()]
results = memcache.get_multi(keys)
self.assertEqual(
results,
{key1.urlsafe(): self.ctx._conn.adapter.entity_to_pb(ent1),
key2.urlsafe(): self.ctx._conn.adapter.entity_to_pb(ent2)})
foo().check_success()
def testContext_CacheQuery(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
key2 = model.Key(flat=('Foo', 2))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
key1a, key2a = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
self.assertTrue(key2 in self.ctx._cache) # Whitebox.
self.assertEqual(key1, key1a)
self.assertEqual(key2, key2a)
@tasklets.tasklet
def callback(ent):
return ent
qry = query.Query(kind='Foo')
results = yield self.ctx.map_query(qry, callback)
self.assertEqual(results, [ent1, ent2])
self.assertTrue(results[0] is ent1)
self.assertTrue(results[1] is ent2)
foo().check_success()
def testContext_AllocateIds(self):
@tasklets.tasklet
def foo():
key = model.Key(flat=('Foo', 1))
lo_hi = yield self.ctx.allocate_ids(key, size=10)
self.assertEqual(lo_hi, (1, 10))
lo_hi = yield self.ctx.allocate_ids(key, max=20)
self.assertEqual(lo_hi, (11, 20))
foo().check_success()
def testContext_MapQuery(self):
@tasklets.tasklet
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([1, 2, 3]))
def testContext_MapQuery_NoCallback(self):
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, None)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(len(res), 3)
for i, ent in enumerate(res):
self.assertTrue(isinstance(ent, model.Model))
self.assertEqual(ent.key.flat(), ['Foo', i+1])
def testContext_MapQuery_NonTaskletCallback(self):
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(res, [1, 2, 3])
def testContext_MapQuery_CustomFuture(self):
mfut = tasklets.QueueFuture()
@tasklets.tasklet
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, merge_future=mfut)
self.assertEqual(res, None)
vals = set()
for i in range(3):
val = yield mfut.getq()
vals.add(val)
fail = mfut.getq()
self.assertRaises(EOFError, fail.get_result)
raise tasklets.Return(vals)
res = foo().get_result()
self.assertEqual(res, set([1, 2, 3]))
def testContext_MapQuery_KeysOnly(self):
qo = query.QueryOptions(keys_only=True)
@tasklets.tasklet
def callback(key):
return key.pairs()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, options=qo)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([('Foo', 1), ('Foo', 2), ('Foo', 3)]))
def testContext_IterQuery(self):
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
it = self.ctx.iter_query(qry)
res = []
while True:
try:
ent = yield it.getq()
except EOFError:
break
res.append(ent)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(len(res), 3)
for i, ent in enumerate(res):
self.assertTrue(isinstance(ent, model.Model))
self.assertEqual(ent.key.flat(), ['Foo', i+1])
def testContext_TransactionFailed(self):
@tasklets.tasklet
def foo():
key = model.Key(flat=('Foo', 1))
ent = model.Expando(key=key, bar=1)
yield self.ctx.put(ent)
@tasklets.tasklet
def callback():
ctx = tasklets.get_context()
self.assertTrue(key not in ctx._cache) # Whitebox.
e = yield key.get_async()
self.assertTrue(key in ctx._cache) # Whitebox.
e.bar = 2
yield e.put_async()
yield self.ctx.transaction(callback)
self.assertEqual(self.ctx._cache[key].bar, 2)
foo().check_success()
def testContext_GetOrInsert(self):
# This also tests Context.transaction()
class Mod(model.Model):
data = model.StringProperty()
@tasklets.tasklet
def foo():
ent = yield self.ctx.get_or_insert(Mod, 'a', data='hello')
assert isinstance(ent, Mod)
ent2 = yield self.ctx.get_or_insert(Mod, 'a', data='hello')
assert ent2 == ent
foo().check_success()
def testContext_GetOrInsertWithParent(self):
# This also tests Context.transaction()
class Mod(model.Model):
data = model.StringProperty()
@tasklets.tasklet
def foo():
parent = model.Key(flat=('Foo', 1))
ent = yield self.ctx.get_or_insert(Mod, 'a', parent=parent, data='hello')
assert isinstance(ent, Mod)
ent2 = yield self.ctx.get_or_insert(Mod, 'a', parent=parent, data='hello')
assert ent2 == ent
foo().check_success()
def testAddContextDecorator(self):
class Demo(object):
@context.toplevel
def method(self, arg):
return (tasklets.get_context(), arg)
a = Demo()
old_ctx = tasklets.get_context()
ctx, arg = a.method(42)
self.assertTrue(isinstance(ctx, context.Context))
self.assertEqual(arg, 42)
self.assertTrue(ctx is not old_ctx)
def testDefaultContextTransaction(self):
@tasklets.synctasklet
def outer():
ctx1 = tasklets.get_context()
@tasklets.tasklet
def inner():
ctx2 = tasklets.get_context()
self.assertTrue(ctx1 is not ctx2)
self.assertTrue(isinstance(ctx2._conn,
datastore_rpc.TransactionalConnection))
return 42
a = yield tasklets.get_context().transaction(inner)
ctx1a = tasklets.get_context()
self.assertTrue(ctx1 is ctx1a)
raise tasklets.Return(a)
b = outer()
self.assertEqual(b, 42)
def testExplicitTransactionClearsDefaultContext(self):
old_ctx = tasklets.get_context()
@tasklets.synctasklet
def outer():
ctx1 = tasklets.get_context()
@tasklets.tasklet
def inner():
ctx = tasklets.get_context()
self.assertTrue(ctx is not ctx1)
key = model.Key('Account', 1)
ent = yield key.get_async()
self.assertTrue(tasklets.get_context() is ctx)
self.assertTrue(ent is None)
raise tasklets.Return(42)
fut = ctx1.transaction(inner)
self.assertEqual(tasklets.get_context(), ctx1)
val = yield fut
self.assertEqual(tasklets.get_context(), ctx1)
raise tasklets.Return(val)
val = outer()
self.assertEqual(val, 42)
self.assertTrue(tasklets.get_context() is old_ctx)
def testKindError(self):
ctx = context.Context()
# If the cache is enabled, attempts to retrieve the object we just put will
# be satisfied from the cache, so the adapter we're testing will never get
# called.
ctx.set_cache_policy(lambda key: False)
@tasklets.tasklet
def foo():
# Foo class is declared in query_test, so let's get a unusual class name.
key1 = model.Key(flat=('ThisModelClassDoesntExist', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
key = yield ctx.put(ent1)
a = yield ctx.get(key1)
self.assertRaises(model.KindError, foo().check_success)
def main():
##logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""A tasklet decorator.
Tasklets are a way to write concurrently running functions without
threads; tasklets are executed by an event loop and can suspend
themselves blocking for I/O or some other operation using a yield
statement. The notion of a blocking operation is abstracted into the
Future class, but a tasklet may also yield an RPC in order to wait for
that RPC to complete.
The @tasklet decorator wraps generator function so that when it is
called, a Future is returned while the generator is executed by the
event loop. For example:
@tasklet
def foo():
a = yield <some Future>
c = yield <another Future>
raise Return(a + b)
def main():
f = foo()
x = f.get_result()
print x
Note that blocking until the Future's result is available using
get_result() is somewhat inefficient (though not vastly -- it is not
busy-waiting). In most cases such code should be rewritten as a tasklet
instead:
@tasklet
def main_tasklet():
f = foo()
x = yield f
print x
Calling a tasklet automatically schedules it with the event loop:
def main():
f = main_tasklet()
eventloop.run() # Run until no tasklets left to do
assert f.done()
As a special feature, if the wrapped function is not a generator
function, its return value is returned via the Future. This makes the
following two equivalent:
@tasklet
def foo():
return 42
@tasklet
def foo():
if False: yield # The presence of 'yield' makes foo a generator
raise Return(42) # Or, after PEP 380, return 42
This feature (inspired by Monocle) is handy in case you are
implementing an interface that expects tasklets but you have no need to
suspend -- there's no need to insert a dummy yield in order to make
the tasklet into a generator.
"""
import collections
import logging
import os
import sys
import types
from google.appengine.api.apiproxy_stub_map import UserRPC
from google.appengine.api.apiproxy_rpc import RPC
from google.appengine.datastore import datastore_rpc
from ndb import eventloop, utils
def is_generator(obj):
"""Helper to test for a generator object.
NOTE: This tests for the (iterable) object returned by calling a
generator function, not for a generator function.
"""
return isinstance(obj, types.GeneratorType)
class Future(object):
"""A Future has 0 or more callbacks.
The callbacks will be called when the result is ready.
NOTE: This is somewhat inspired but not conformant to the Future interface
defined by PEP 3148. It is also inspired (and tries to be somewhat
compatible with) the App Engine specific UserRPC and MultiRpc classes.
"""
# TODO: Trim the API; there are too many ways to do the same thing.
# TODO: Compare to Monocle's much simpler Callback class.
# Constants for state property.
IDLE = RPC.IDLE # Not yet running (unused)
RUNNING = RPC.RUNNING # Not yet completed.
FINISHING = RPC.FINISHING # Completed.
_all_pending = set() # Set of all pending Future instances.
# XXX Add docstrings to all methods. Separate PEP 3148 API from RPC API.
_geninfo = None # Extra info about suspended generator.
def __init__(self, info=None):
# TODO: Make done a method, to match PEP 3148?
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._info = info # Info from the caller about this Future's purpose.
self._where = utils.get_stack()
self._context = None
self._reset()
def _reset(self):
self._done = False
self._result = None
self._exception = None
self._traceback = None
self._callbacks = []
logging.debug('_all_pending: add %s', self)
self._all_pending.add(self)
self._next = None # Links suspended Futures together in a stack.
# TODO: Add a __del__ that complains if neither get_exception() nor
# check_success() was ever called? What if it's not even done?
def __repr__(self):
if self._done:
if self._exception is not None:
state = 'exception %s: %s' % (self._exception.__class__.__name__,
self._exception)
else:
state = 'result %r' % (self._result,)
else:
state = 'pending'
line = '?'
for line in self._where:
if 'ndb/tasklets.py' not in line:
break
if self._info:
line += ' for %s;' % self._info
if self._geninfo:
line += ' %s;' % self._geninfo
return '<%s %x created by %s %s>' % (
self.__class__.__name__, id(self), line, state)
def dump(self):
return '%s\nCreated by %s' % (self.dump_stack(),
'\n called by '.join(self._where))
def dump_stack(self):
lines = []
fut = self
while fut is not None:
lines.append(str(fut))
fut = fut._next
return '\n waiting for '.join(lines)
@classmethod
def clear_all_pending(cls):
if cls._all_pending:
logging.info('_all_pending: clear %s', cls._all_pending)
else:
logging.debug('_all_pending: clear no-op')
cls._all_pending.clear()
@classmethod
def dump_all_pending(cls, verbose=False):
all = []
for fut in cls._all_pending:
if verbose:
line = fut.dump() + ('\n' + '-'*40)
else:
line = fut.dump_stack()
all.append(line)
return '\n'.join(all)
def add_callback(self, callback, *args, **kwds):
if self._done:
eventloop.queue_call(None, callback, *args, **kwds)
else:
self._callbacks.append((callback, args, kwds))
def set_result(self, result):
assert not self._done
self._result = result
self._done = True
logging.debug('_all_pending: remove successful %s', self)
self._all_pending.remove(self)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def set_exception(self, exc, tb=None):
assert isinstance(exc, BaseException)
assert not self._done
self._exception = exc
self._traceback = tb
self._done = True
if self in self._all_pending:
logging.debug('_all_pending: remove failing %s', self)
self._all_pending.remove(self)
else:
logging.debug('_all_pending: not found %s', self)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def done(self):
return self._done
@property
def state(self):
# This is just for compatibility with UserRPC and MultiRpc.
# A Future is considered running as soon as it is created.
if self._done:
return self.FINISHING
else:
return self.RUNNING
def wait(self):
if self._done:
return
ev = eventloop.get_event_loop()
while not self._done:
if not ev.run1():
logging.info('Deadlock in %s', self)
logging.info('All pending Futures:\n%s', self.dump_all_pending())
if logging.getLogger().level <= logging.DEBUG:
logging.debug('All pending Futures (verbose):\n%s',
self.dump_all_pending(verbose=True))
self.set_exception(RuntimeError('Deadlock waiting for %s' % self))
def get_exception(self):
self.wait()
return self._exception
def get_traceback(self):
self.wait()
return self._traceback
def check_success(self):
self.wait()
if self._exception is not None:
raise self._exception.__class__, self._exception, self._traceback
def get_result(self):
self.check_success()
return self._result
@classmethod
def wait_any(cls, futures):
# TODO: Flatten MultiRpcs.
all = set(futures)
ev = eventloop.get_event_loop()
while all:
for f in all:
if f.state == cls.FINISHING:
return f
ev.run1()
return None
@classmethod
def wait_all(cls, futures):
# TODO: Flatten MultiRpcs.
all = set(futures)
ev = eventloop.get_event_loop()
while all:
all = set(f for f in all if f.state == cls.RUNNING)
ev.run1()
def _help_tasklet_along(self, gen, val=None, exc=None, tb=None):
# XXX Docstring
info = utils.gen_info(gen)
__ndb_debug__ = info
try:
save_context = get_context()
try:
set_context(self._context)
if exc is not None:
logging.debug('Throwing %s(%s) into %s',
exc.__class__.__name__, exc, info)
value = gen.throw(exc.__class__, exc, tb)
else:
logging.debug('Sending %r to %s', val, info)
value = gen.send(val)
self._context = get_context()
finally:
set_context(save_context)
except StopIteration, err:
result = get_return_value(err)
logging.debug('%s returned %r', info, result)
self.set_result(result)
return
except Exception, err:
_, _, tb = sys.exc_info()
logging.warning('%s raised %s(%s)',
info, err.__class__.__name__, err,
exc_info=(logging.getLogger().level <= logging.INFO))
self.set_exception(err, tb)
return
else:
logging.debug('%s yielded %r', info, value)
if isinstance(value, datastore_rpc.MultiRpc):
# TODO: Tail recursion if the RPC is already complete.
if len(value.rpcs) == 1:
value = value.rpcs[0]
# Fall through to next isinstance test.
else:
assert False # TODO: Support MultiRpc using MultiFuture.
if isinstance(value, UserRPC):
# TODO: Tail recursion if the RPC is already complete.
eventloop.queue_rpc(value, self._on_rpc_completion, value, gen)
return
if isinstance(value, Future):
# TODO: Tail recursion if the Future is already done.
assert not self._next, self._next
self._next = value
self._geninfo = utils.gen_info(gen)
logging.debug('%s is now blocked waiting for %s', self, value)
value.add_callback(self._on_future_completion, value, gen)
return
if isinstance(value, (tuple, list)):
# Arrange for yield to return a list of results (not Futures).
info = 'multi-yield from ' + utils.gen_info(gen)
mfut = MultiFuture(info)
for subfuture in value:
mfut.add_dependent(subfuture)
mfut.complete()
mfut.add_callback(self._on_future_completion, mfut, gen)
return
if is_generator(value):
assert False # TODO: emulate PEP 380 here?
assert False # A tasklet shouldn't yield plain values.
def _on_rpc_completion(self, rpc, gen):
try:
result = rpc.get_result()
except Exception, err:
_, _, tb = sys.exc_info()
self._help_tasklet_along(gen, exc=err, tb=tb)
else:
self._help_tasklet_along(gen, result)
def _on_future_completion(self, future, gen):
if self._next is future:
self._next = None
self._geninfo = None
logging.debug('%s is no longer blocked waiting for %s', self, future)
exc = future.get_exception()
if exc is not None:
self._help_tasklet_along(gen, exc=exc, tb=future.get_traceback())
else:
val = future.get_result() # This won't raise an exception.
self._help_tasklet_along(gen, val)
def sleep(dt):
"""Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec.
"""
fut = Future('sleep(%.3f)' % dt)
eventloop.queue_call(dt, fut.set_result, None)
return fut
class MultiFuture(Future):
"""A Future that depends on multiple other Futures.
The protocol from the caller's POV is:
mf = MultiFuture()
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
.
. (More mf.add_dependent() and/or mf.putq() calls)
.
mf.complete() # No more dependents will be added.
.
. (Time passes)
.
results = mf.get_result()
Now, results is a list of results from all dependent Futures in
the order in which they were added.
It is legal to add the same dependent multiple times.
Callbacks can be added at any point.
From a dependent Future POV, there's nothing to be done: a callback
is automatically added to each dependent Future which will signal
its completion to the MultiFuture.
"""
def __init__(self, info=None):
self._full = False
self._dependents = set()
self._results = []
super(MultiFuture, self).__init__(info=info)
def __repr__(self):
# TODO: This may be invoked before __init__() returns,
# from Future.__init__(). Beware.
line = super(MultiFuture, self).__repr__()
lines = [line]
for fut in self._results:
lines.append(fut.dump_stack().replace('\n', '\n '))
return '\n waiting for '.join(lines)
# TODO: Rename this method? (But to what?)
def complete(self):
assert not self._full
self._full = True
if not self._dependents:
# TODO: How to get multiple exceptions?
self.set_result([r.get_result() for r in self._results])
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
assert isinstance(fut, Future)
assert not self._full
self._results.append(fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
self._dependents.remove(fut)
if self._full and not self._dependents:
# TODO: How to get multiple exceptions?
self.set_result([r.get_result() for r in self._results])
class QueueFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However, instead of returning results as a list, it lets you
retrieve results as soon as they are ready, one at a time, using
getq(). The Future itself finishes with a result of None when the
last result is ready (regardless of whether it was retrieved).
The getq() method returns a Future which blocks until the next
result is ready, and then returns that result. Each getq() call
retrieves one unique result. Extra getq() calls after the last
result is already returned return EOFError as their Future's
exception. (I.e., q.getq() returns a Future as always, but yieding
that Future raises EOFError.)
NOTE: If .getq() is given a default argument, it will be returned as
the result instead of raising EOFError. However, other exceptions
are still passed through.
NOTE: Values can also be pushed directly via .putq(value). However
there is no flow control -- if the producer is faster than the
consumer, the queue will grow unbounded.
"""
# TODO: Refactor to share code with MultiFuture.
# TODO: Kill getq(default) or add it uniformly.
_RAISE_ERROR = object() # Marker for getq() default value.
def __init__(self, info=None):
self._full = False
self._dependents = set()
self._completed = list()
self._waiting = list() # List of (Future, default) tuples.
super(QueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
assert not self._full
self._full = True
if not self._dependents:
self._mark_finished()
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
assert isinstance(fut, Future)
assert not self._full
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
assert fut.done()
self._dependents.remove(fut)
exc = fut.get_exception()
tb = fut.get_traceback()
val = None
if exc is None:
val = fut.get_result()
if self._waiting:
waiter, default = self._waiting.pop(0)
self._pass_result(waiter, exc, tb, val)
else:
self._completed.append((exc, tb, val))
if self._full and not self._dependents:
self._mark_finished()
def _mark_finished(self):
waiting = self._waiting[:]
del self._waiting[:]
for waiter, default in waiting:
self._pass_eof(waiter, default)
self.set_result(None)
def getq(self, default=_RAISE_ERROR):
# The default is only used when EOFError is raised.
fut = Future()
if self._completed:
exc, tb, val = self._completed.pop(0)
self._pass_result(fut, exc, tb, val)
elif self._full and not self._dependents:
self._pass_eof(fut, default)
else:
self._waiting.append((fut, default))
return fut
def _pass_eof(self, fut, default):
if default is self._RAISE_ERROR:
self._pass_result(fut, EOFError('Queue is empty'), None, None)
else:
self._pass_result(fut, None, None, default)
def _pass_result(self, fut, exc, tb, val):
if exc is not None:
fut.set_exception(exc, tb)
else:
fut.set_result(val)
class SerialQueueFuture(Future):
"""Like QueueFuture but maintains the order of insertion."""
def __init__(self, info=None):
self._full = False
self._queue = collections.deque()
self._waiting = collections.deque()
# Invariant: at least one of _queue and _waiting is empty.
super(SerialQueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
assert not self._full
self._full = True
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(EOFError('Queue is empty'))
if not self._queue:
self.set_result(None)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
if self._waiting:
waiter = self._waiting.popleft()
waiter.set_result(value)
return
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
assert isinstance(fut, Future)
assert not self._full
if self._waiting:
waiter = self._waiting.popleft()
# TODO: Transfer errors too.
fut.add_callback(lambda: waiter.set_result(fut.get_result()))
else:
self._queue.append(fut)
def getq(self):
if self._queue:
fut = self._queue.popleft()
else:
fut = Future()
if self._full:
fut.set_exception(EOFError('Queue is empty'))
else:
self._waiting.append(fut)
if self._full and not self.done():
self.set_result(None)
return fut
class ReducingFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However the result, instead of being a list of results of dependent
Futures, is computed by calling a 'reducer' tasklet. The reducer tasklet
takes a list of values and returns a single value. It may be called
multiple times on sublists of values and should behave like
e.g. sum().
NOTE: The reducer input values may be reordered compared to the
order in which they were added to the queue.
"""
# TODO: Refactor to reuse some code with MultiFuture.
def __init__(self, reducer, info=None, batch_size=20):
self._reducer = reducer
self._batch_size = batch_size
self._full = False
self._dependents = set()
self._completed = list()
self._queue = list()
super(ReducingFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
assert not self._full
self._full = True
if not self._dependents:
self._mark_finished()
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
assert not self._full
self._internal_add_dependent(fut)
def _internal_add_dependent(self, fut):
assert isinstance(fut, Future)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
assert fut.done()
self._dependents.remove(fut)
val = fut.get_result() # TODO: What about exceptions here?
self._queue.append(val)
if len(self._queue) >= self._batch_size:
todo = self._queue[:]
del self._queue[:]
nval = self._reducer(todo) # TODO: What if exception?
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self._queue.append(nval)
if self._full and not self._dependents:
self._mark_finished()
def _mark_finished(self):
if not self._queue:
self.set_result(None)
elif len(self._queue) == 1:
self.set_result(self._queue.pop())
else:
todo = self._queue[:]
del self._queue[:]
nval = self._reducer(todo) # TODO: What if exception?
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self.set_result(nval)
# Alias for StopIteration used to mark return values.
# To use this, raise Return(<your return value>). The semantics
# are exactly the same as raise StopIteration(<your return value>)
# but using Return clarifies that you are intending this to be the
# return value of a tasklet.
# TODO: According to Monocle authors Steve and Greg Hazel, Twisted
# used an exception to signal a return value from a generator early
# on, and they found out it was error-prone. Should I worry?
Return = StopIteration
def get_return_value(err):
# XXX Docstring
if not err.args:
result = None
elif len(err.args) == 1:
result = err.args[0]
else:
result = err.args
return result
def tasklet(func):
# XXX Docstring
@utils.wrapping(func)
def tasklet_wrapper(*args, **kwds):
# XXX Docstring
# TODO: make most of this a public function so you can take a bare
# generator and turn it into a tasklet dynamically. (Monocle has
# this I believe.)
# __ndb_debug__ = utils.func_info(func)
fut = Future('tasklet %s' % utils.func_info(func))
fut._context = get_context()
try:
result = func(*args, **kwds)
except StopIteration, err:
# Just in case the function is not a generator but still uses
# the "raise Return(...)" idiom, we'll extract the return value.
result = get_return_value(err)
if is_generator(result):
eventloop.queue_call(None, fut._help_tasklet_along, result)
else:
fut.set_result(result)
return fut
return tasklet_wrapper
def synctasklet(func):
"""Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method).
"""
@utils.wrapping(func)
def synctasklet_wrapper(*args):
__ndb_debug__ = utils.func_info(func)
taskletfunc = tasklet(func)
return taskletfunc(*args).get_result()
return synctasklet_wrapper
_CONTEXT_KEY = '__CONTEXT__'
# TODO: Use thread-local for this.
_context = None
def get_context():
global _context
ctx = None
if os.getenv(_CONTEXT_KEY):
ctx = _context
if ctx is None:
ctx = make_default_context()
set_context(ctx)
return ctx
def make_default_context():
import context # Late import to deal with circular imports.
return context.Context()
def set_context(new_context):
global _context
os.environ[_CONTEXT_KEY] = '1'
_context = new_context
# TODO: Rework the following into documentation.
# A tasklet/coroutine/generator can yield the following things:
# - Another tasklet/coroutine/generator; this is entirely equivalent to
# "for x in g: yield x"; this is handled entirely by the @tasklet wrapper.
# (Actually, not. @tasklet returns a function that when called returns
# a Future. You can use the pep380 module's @gwrap decorator to support
# yielding bare generators though.)
# - An RPC (or MultiRpc); the tasklet will be resumed when this completes.
# This does not use the RPC's callback mechanism.
# - A Future; the tasklet will be resumed when the Future is done.
# This uses the Future's callback mechanism.
# A Future can be used in several ways:
# - Yield it from a tasklet; see above.
# - Check (poll) its status via f.done.
# - Call its wait() method, perhaps indirectly via check_success()
# or get_result(). This invokes the event loop.
# - Call the Future.wait_any() or Future.wait_all() method.
# This is waits for any or all Futures and RPCs in the argument list.
# XXX HIRO XXX
# - A tasklet is a (generator) function decorated with @tasklet.
# - Calling a tasklet schedules the function for execution and returns a Future.
# - A function implementing a tasklet may:
# = yield a Future; this waits for the Future which returns f.get_result();
# = yield an RPC; this waits for the RPC and then returns rpc.get_result();
# = raise Return(result); this sets the outer Future's result;
# = raise StopIteration or return; this sets the outer Future's result;
# = raise another exception: this sets the outer Future's exception.
# - If a function implementing a tasklet is not a generator it will be
# immediately executed to completion and the tasklet wrapper will
# return a Future that is already done. (XXX Alternative behavior:
# it schedules the call to be run by the event loop.)
# - Code not running in a tasklet can call f.get_result() or f.wait() on
# a future. This is implemented by a simple loop like the following:
# while not self.done:
# eventloop.run1()
# - Here eventloop.run1() runs one "atomic" part of the event loop:
# = either it calls one immediately ready callback;
# = or it waits for the first RPC to complete;
# = or it sleeps until the first callback should be ready;
# = or it raises an exception indicating all queues are empty.
# - It is possible but suboptimal to call rpc.get_result() or
# rpc.wait() directly on an RPC object since this will not allow
# other callbacks to run as they become ready. Wrapping an RPC in a
# Future will take care of this issue.
# - The important insight is that when a generator function
# implementing a tasklet yields, raises or returns, there is always a
# wrapper that catches this event and either turns it into a
# callback sent to the event loop, or sets the result or exception
# for the tasklet's Future.
| Python |
"""Unused PEP 380 emulation."""
import sys
from ndb.tasklets import is_generator, Return, get_return_value
def gwrap(func):
"""Decorator to emulate PEP 380 behavior.
Inside a generator function wrapped in @gwrap, 'yield g', where g is
a generator object, is equivalent to 'for x in g: yield x', except
that 'yield g' can also return a value, and that value is whatever g
passed as the argument to StopIteration when it stopped.
The idea is that once PEP 380 is implemented, you can drop @gwrap,
replace 'yield g' with 'yield from g' and 'raise Return(x)' with
'return x', and everything will work exactly the same as before.
NOTE: This is not quite the same as @tasklet, which offers event loop
integration.
"""
def gwrap_wrapper(*args, **kwds):
"""The wrapper function that is actually returned by gwrap()."""
# Call the wrapped function. If it is a generator function, this
# returns a generator object. If it raises an exception, let it
# percolate up unchanged.
gen = func(*args, **kwds)
# If that didn't return a generator object, pretend it was a
# generator that yielded no items.
if not is_generator(gen):
if gen is None:
return # Don't bother creating a Return() if it returned None.
raise Return(gen)
# If this is an immediately recursive call to gwrap_wrapper(),
# yield out the generator to let the outer call handle things.
if sys._getframe(1).f_code is sys._getframe(0).f_code:
result = yield gen
raise Return(result)
# The following while loop elaborates on "for x in g: yield x":
#
# 1. Pass values or exceptions received from yield back into g.
# That's just part of a truly transparent wrapper for a generator.
#
# 2. When x is a generator, loop over it in turn, using a stack to
# avoid excessive recursion. That's part of emulating PEP 380
# so that "yield g" is interpreted as "yield from g" (which
# roughly means "for x in g: yield x").
#
# 3. Pass values and exceptions up that stack. This is where my
# brain keeps hurting.
to_send = None # What to send into the top generator.
to_throw = None # What to throw into the top generator.
stack = [gen] # Stack of generators.
while stack:
# Throw or send something into the current generator.
gen = stack[-1]
try:
if to_throw is not None:
gen.throw(to_throw)
else:
to_yield = gen.send(to_send)
except StopIteration, err:
# The generator has no more items. Pop it off the stack.
stack.pop()
if not stack:
raise # We're done.
# Prepare to send this value into the next generator on the stack.
to_send = get_return_value(err)
to_throw = None
continue
except Exception, err:
# The generator raised an exception. Pop it off the stack.
stack.pop()
if not stack:
raise # We're done.
# Prepare to throw this exception into the next generator on the stack.
to_send = None
to_throw = err
continue
else:
# The generator yielded a value.
to_throw = None
to_send = None
if not is_generator(to_yield):
# It yielded some plain value. Yield this outwards.
# Whatever our yield returns or raises will be sent or thrown
# into the current generator.
# TODO: support "yield Return(...)" as an alternative for
# "raise Return(...)"? Monocle users would like that.
try:
# If the yield returns a value, prepare to send that into
# the current generator.
to_send = yield to_yield
except (Exception, GeneratorExit), err:
# The yield raised an exception. Prepare to throw it into
# the current generator. (GeneratorExit sometimes inherits
# from BaseException, but we do want to catch it.)
to_throw = err
else:
# It yielded another generator. Push it onto the stack.
# Note that this new generator is (assumed to be) in the
# "initial" state for generators, meaning that it hasn't
# executed any code in the generator function's body yet.
# In this state we may only call gen.next() or gen.send(None),
# so it's a good thing that to_send and to_throw are None.
stack.append(to_yield)
return gwrap_wrapper
def gclose(gen):
"""Substitute for gen.close() that returns a value."""
# TODO: Tweak the result of gwrap() to return an object that defines
# a close method that works this way?
assert is_generator(gen), '%r is not a generator' % g
# Throw GeneratorExit until it obeys.
while True:
try:
gen.throw(GeneratorExit)
except StopIteration, err:
return get_return_value(err)
except GeneratorExit:
return None
# Note: other exceptions are passed out untouched.
| Python |
"""Context class."""
# TODO: Handle things like request size limits. E.g. what if we've
# batched up 1000 entities to put and now the memcache call fails?
import logging
import sys
from google.appengine.api import datastore_errors
from google.appengine.api import memcache
from google.appengine.datastore import datastore_rpc
import ndb.key
from ndb import model, tasklets, eventloop, utils
class AutoBatcher(object):
def __init__(self, todo_tasklet):
# todo_tasklet is a tasklet to be called with list of (future, arg) pairs
self._todo_tasklet = todo_tasklet
self._todo = [] # List of (future, arg) pairs
self._running = None # Currently running tasklet, if any
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._todo_tasklet.__name__)
def add(self, arg):
fut = tasklets.Future('%s.add(%s)' % (self, arg))
if not self._todo: # Schedule the callback
# We use the fact that regular tasklets are queued at time None,
# which puts them at absolute time 0 (i.e. ASAP -- still on a
# FIFO basis). Callbacks explicitly scheduled with a delay of 0
# are only run after all immediately runnable tasklets have run.
eventloop.queue_call(0, self._autobatcher_callback)
self._todo.append((fut, arg))
return fut
def _autobatcher_callback(self):
if not self._todo:
return
if self._running is not None:
# Another callback may still be running.
if not self._running.done():
# Wait for it to complete first, then try again.
self._running.add_callback(self._autobatcher_callback)
return
self._running = None
# We cannot postpone the inevitable any longer.
todo = self._todo
self._todo = [] # Get ready for the next batch
logging.info('AutoBatcher(%s): %d items',
self._todo_tasklet.__name__, len(todo))
self._running = self._todo_tasklet(todo)
# Add a callback to the Future to propagate exceptions,
# since this Future is not normally checked otherwise.
self._running.add_callback(self._running.check_success)
@tasklets.tasklet
def flush(self):
while self._running or self._todo:
if self._running:
if self._running.done():
self._running.check_success()
self._running = None
else:
yield self._running
else:
self._autobatcher_callback()
# TODO: Rename? To what? Session???
class Context(object):
def __init__(self, conn=None, auto_batcher_class=AutoBatcher):
if conn is None:
conn = model.make_connection()
self._conn = conn
self._auto_batcher_class = auto_batcher_class
self._get_batcher = auto_batcher_class(self._get_tasklet)
self._put_batcher = auto_batcher_class(self._put_tasklet)
self._delete_batcher = auto_batcher_class(self._delete_tasklet)
self._cache = {}
self._cache_policy = lambda key: True
self._memcache_policy = lambda key: True
# TODO: Also add a way to compute the memcache expiration time.
@tasklets.tasklet
def flush(self):
yield (self._get_batcher.flush(),
self._put_batcher.flush(),
self._delete_batcher.flush())
@tasklets.tasklet
def _get_tasklet(self, todo):
assert todo
# First check memcache.
keys = set(key for _, key in todo)
memkeymap = dict((key, key.urlsafe())
for key in keys if self.should_memcache(key))
if memkeymap:
results = memcache.get_multi(memkeymap.values())
leftover = []
## del todo[1:] # Uncommenting this creates an interesting bug.
for fut, key in todo:
mkey = memkeymap[key]
if mkey in results:
pb = results[mkey]
ent = self._conn.adapter.pb_to_entity(pb)
fut.set_result(ent)
else:
leftover.append((fut, key))
todo = leftover
if todo:
keys = [key for (_, key) in todo]
# TODO: What if async_get() created a non-trivial MultiRpc?
results = yield self._conn.async_get(None, keys)
for ent, (fut, _) in zip(results, todo):
fut.set_result(ent)
@tasklets.tasklet
def _put_tasklet(self, todo):
assert todo
# TODO: What if the same entity is being put twice?
# TODO: What if two entities with the same key are being put?
# TODO: Clear entities from memcache before starting the write?
# TODO: Attempt to prevent dogpile effect while keeping cache consistent?
ents = [ent for (_, ent) in todo]
results = yield self._conn.async_put(None, ents)
for key, (fut, ent) in zip(results, todo):
if key != ent.key:
if ent.has_complete_key():
raise datastore_errors.BadKeyError(
'Entity key differs from the one returned by the datastore. '
'Expected %r, got %r' % (key, ent.key))
ent.key = key
fut.set_result(key)
# Now update memcache.
# TODO: Could we update memcache *before* calling async_put()?
# (Hm, not for new entities but possibly for updated ones.)
mapping = {}
for _, ent in todo:
if self.should_memcache(ent.key):
pb = self._conn.adapter.entity_to_pb(ent)
mapping[ent.key.urlsafe()] = pb
if mapping:
# TODO: Optionally set the memcache expiration time;
# maybe configurable based on key (or even entity).
failures = memcache.set_multi(mapping)
if failures:
badkeys = []
for failure in failures:
badkeys.append(mapping[failure].key)
logging.info('memcache failed to set %d out of %d keys: %s',
len(failures), len(mapping), badkeys)
@tasklets.tasklet
def _delete_tasklet(self, todo):
assert todo
keys = set(key for (_, key) in todo)
yield self._conn.async_delete(None, keys)
for fut, _ in todo:
fut.set_result(None)
# Now update memcache.
memkeys = [key.urlsafe() for key in keys if self.should_memcache(key)]
if memkeys:
memcache.delete_multi(memkeys)
# The value returned by delete_multi() is pretty much useless, it
# could be the keys were never cached in the first place.
def get_cache_policy(self):
"""Returns the current context cache policy.
Returns:
A function that accepts a Key instance as argument and returns
a boolean indicating if it should be cached.
"""
return self._cache_policy
def set_cache_policy(self, func):
"""Sets the context cache policy.
Args:
func: A function that accepts a Key instance as argument and returns
a boolean indicating if it should be cached.
"""
self._cache_policy = func
def should_cache(self, key):
"""Return whether to use the context cache for this key.
Args:
key: Key instance.
Returns:
True if the key should be cached, False otherwise.
"""
return self._cache_policy(key)
def get_memcache_policy(self):
"""Returns the current memcache policy.
Returns:
A function that accepts a Key instance as argument and returns
a boolean indicating if it should be cached.
"""
return self._memcache_policy
def set_memcache_policy(self, func):
"""Sets the memcache policy.
Args:
func: A function that accepts a Key instance as argument and returns
a boolean indicating if it should be cached.
"""
self._memcache_policy = func
def should_memcache(self, key):
"""Return whether to use memcache for this key.
Args:
key: Key instance.
Returns:
True if the key should be cached, False otherwise.
"""
return self._memcache_policy(key)
# TODO: What about conflicting requests to different autobatchers,
# e.g. tasklet A calls get() on a given key while tasklet B calls
# delete()? The outcome is nondeterministic, depending on which
# autobatcher gets run first. Maybe we should just flag such
# conflicts as errors, with an overridable policy to resolve them
# differently?
@tasklets.tasklet
def get(self, key):
"""Returns a Model instance given the entity key.
It will use the context cache if the cache policy for the given
key is enabled.
Args:
key: Key instance.
Returns:
A Model instance it the key exists in the datastore; None otherwise.
"""
should_cache = self.should_cache(key)
if should_cache and key in self._cache:
entity = self._cache[key] # May be None, meaning "doesn't exist".
else:
entity = yield self._get_batcher.add(key)
if should_cache:
self._cache[key] = entity
raise tasklets.Return(entity)
@tasklets.tasklet
def put(self, entity):
key = yield self._put_batcher.add(entity)
if entity.key != key:
logging.info('replacing key %s with %s', entity.key, key)
entity.key = key
# TODO: For updated entities, could we update the cache first?
if self.should_cache(key):
# TODO: What if by now the entity is already in the cache?
self._cache[key] = entity
raise tasklets.Return(key)
@tasklets.tasklet
def delete(self, key):
yield self._delete_batcher.add(key)
if key in self._cache:
self._cache[key] = None
@tasklets.tasklet
def allocate_ids(self, key, size=None, max=None):
lo_hi = yield self._conn.async_allocate_ids(None, key, size, max)
raise tasklets.Return(lo_hi)
@datastore_rpc._positional(3)
def map_query(self, query, callback, options=None, merge_future=None):
mfut = merge_future
if mfut is None:
mfut = tasklets.MultiFuture('map_query')
@tasklets.tasklet
def helper():
inq = tasklets.SerialQueueFuture()
query.run_to_queue(inq, self._conn, options)
is_ancestor_query = query.ancestor is not None
while True:
try:
ent = yield inq.getq()
except EOFError:
break
if isinstance(ent, model.Key):
pass # It was a keys-only query and ent is really a Key.
else:
key = ent.key
if key in self._cache:
# Assume the cache is more up to date.
if self._cache[key] is None:
# This is a weird case. Apparently this entity was
# deleted concurrently with the query. Let's just
# pretend the delete happened first.
logging.info('Conflict: entity %s was deleted', key)
continue
# Replace the entity the callback will see with the one
# from the cache.
if ent != self._cache[key]:
logging.info('Conflict: entity %s was modified', key)
ent = self._cache[key]
else:
if is_ancestor_query and self.should_cache(key):
self._cache[key] = ent
if callback is None:
val = ent
else:
val = callback(ent) # TODO: If this raises, log and ignore
mfut.putq(val)
mfut.complete()
helper()
return mfut
@datastore_rpc._positional(2)
def iter_query(self, query, options=None):
return self.map_query(query, callback=None, options=options,
merge_future=tasklets.SerialQueueFuture())
@tasklets.tasklet
def transaction(self, callback, retry=3, entity_group=None):
# Will invoke callback() one or more times with the default
# context set to a new, transactional Context. Returns a Future.
# Callback may be a tasklet.
if entity_group is not None:
app = entity_group.app()
else:
app = ndb.key._DefaultAppId()
yield self.flush()
for i in range(1 + max(0, retry)):
transaction = yield self._conn.async_begin_transaction(None, app)
tconn = datastore_rpc.TransactionalConnection(
adapter=self._conn.adapter,
config=self._conn.config,
transaction=transaction,
entity_group=entity_group)
tctx = self.__class__(conn=tconn,
auto_batcher_class=self._auto_batcher_class)
tctx.set_memcache_policy(lambda key: False)
tasklets.set_context(tctx)
try:
try:
result = callback()
if isinstance(result, tasklets.Future):
result = yield result
finally:
yield tctx.flush()
except Exception, err:
t, e, tb = sys.exc_info()
yield tconn.async_rollback(None) # TODO: Don't block???
raise t, e, tb
else:
ok = yield tconn.async_commit(None)
if ok:
# TODO: This is questionable when self is transactional.
self._cache.update(tctx._cache)
self._flush_memcache(tctx._cache)
raise tasklets.Return(result)
# Out of retries
raise datastore_errors.TransactionFailedError(
'The transaction could not be committed. Please try again.')
def flush_cache(self):
"""Clears the in-memory cache.
NOTE: This does not affect memcache.
"""
self._cache.clear()
def _flush_memcache(self, keys):
keys = set(key for key in keys if self.should_memcache(key))
if keys:
memkeys = [key.urlsafe() for key in keys]
memcache.delete_multi(memkeys)
@tasklets.tasklet
def get_or_insert(self, model_class, name,
app=None, namespace=None, parent=None,
**kwds):
# TODO: Test the heck out of this, in all sorts of evil scenarios.
assert isinstance(name, basestring) and name
key = model.Key(model_class, name,
app=app, namespace=namespace, parent=parent)
# TODO: Can (and should) the cache be trusted here?
ent = yield self.get(key)
if ent is None:
@tasklets.tasklet
def txn():
ent = yield key.get_async()
if ent is None:
ent = model_class(**kwds) # TODO: Check for forbidden keys
ent.key = key
yield ent.put_async()
raise tasklets.Return(ent)
ent = yield self.transaction(txn)
raise tasklets.Return(ent)
def toplevel(func):
"""A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions.
"""
@utils.wrapping(func)
def add_context_wrapper(*args):
__ndb_debug__ = utils.func_info(func)
tasklets.Future.clear_all_pending()
# Reset context; a new one will be created on the first call to
# get_context().
tasklets.set_context(None)
ctx = tasklets.get_context()
try:
return tasklets.synctasklet(func)(*args)
finally:
eventloop.run() # Ensure writes are flushed, etc.
return add_context_wrapper
# Transaction API using the default context.
def transaction(callback):
return transaction_async(callback).get_result()
def transaction_async(callback):
return tasklets.get_context().transaction(callback)
| Python |
"""Higher-level Query wrapper.
There are perhaps too many query APIs in the world.
The fundamental API here overloads the 6 comparisons operators to
represent filters on property values, and supports AND and OR
operations (implemented as functions -- Python's 'and' and 'or'
operators cannot be overloaded, and the '&' and '|' operators have a
priority that conflicts with the priority of comparison operators).
For example:
class Employee(Model):
name = StringProperty()
age = IntegerProperty()
rank = IntegerProperty()
@classmethod
def demographic(cls, min_age, max_age):
return cls.query().filter(AND(cls.age >= min_age, cls.age <= max_age))
@classmethod
def ranked(cls, rank):
return cls.query(cls.rank == rank).order(cls.age)
for emp in Employee.seniors(42, 5):
print emp.name, emp.age, emp.rank
The 'in' operator cannot be overloaded, but is supported through the
IN() method. For example:
Employee.query().filter(Employee.rank.IN([4, 5, 6]))
Sort orders are supported through the order() method; unary minus is
overloaded on the Property class to represent a descending order:
Employee.query().order(Employee.name, -Employee.age)
Besides using AND() and OR(), filters can also be combined by
repeatedly calling .filter():
q1 = Employee.query() # A query that returns all employees
q2 = q1.filter(Employee.age >= 30) # Only those over 30
q3 = q2.filter(Employee.age < 40) # Only those in their 30s
Query objects are immutable, so these methods always return a new
Query object; the above calls to filter() do not affect q1.
Sort orders can also be combined this way, and .filter() and .order()
calls may be intermixed:
q4 = q3.order(-Employee.age)
q5 = q4.order(Employee.name)
q6 = q5.filter(Employee.rank == 5)
The simplest way to retrieve Query results is a for-loop:
for emp in q3:
print emp.name, emp.age
Some other operations:
q.map(callback) # Call the callback function for each query result
q.fetch(N) # Return a list of the first N results
q.count(N) # Return the number of results, with a maximum of N
These have asynchronous variants as well, which return a Future; to
get the operation's ultimate result, yield the Future (when inside a
tasklet) or call the Future's get_result() method (outside a tasklet):
q.map_async(callback) # Callback may be a task or a plain function
q.fetch_async(N)
q.count_async(N)
Finally, there's an idiom to efficiently loop over the Query results
in a tasklet, properly yielding when appropriate:
it = iter(q)
while (yield it.has_next_async()):
emp = it.next()
print emp.name, emp.age
"""
__author__ = 'guido@google.com (Guido van Rossum)'
import heapq
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import gql
from ndb import context
from ndb import model
from ndb import tasklets
__all__ = ['Binding', 'AND', 'OR', 'parse_gql', 'Query', 'QueryOptions']
QueryOptions = datastore_query.QueryOptions # For export.
# TODO: Make these protected.
ASC = datastore_query.PropertyOrder.ASCENDING
DESC = datastore_query.PropertyOrder.DESCENDING
_AND = datastore_query.CompositeFilter.AND
_OPS = {
'__eq': '=',
'__ne': '!=',
'__lt': '<',
'__le': '<=',
'__gt': '>',
'__ge': '>=',
'__in': 'in',
}
class Binding(object):
def __init__(self, value=None, key=None):
self.value = value
self.key = key
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.value, self.key)
def __eq__(self, other):
if not isinstance(other, Binding):
return NotImplemented
return self.value == other.value and self.key == other.key
def resolve(self):
value = self.value
assert not isinstance(value, Binding)
return value
class Node(object):
def __new__(cls):
assert cls is not None
return super(Node, cls).__new__(cls)
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
eq = not eq
return eq
def __unordered(self, other):
raise TypeError('Nodes cannot be ordered')
__le__ = __lt__ = __ge__ = __gt__ = __unordered
def _to_filter(self, bindings):
raise NotImplementedError
def _post_filters(self):
return None
def apply(self, entity):
return True
def resolve(self):
raise NotImplementedError
class FalseNode(Node):
def __new__(cls):
return super(Node, cls).__new__(cls)
def __eq__(self, other):
if not isinstane(other, FalseNode):
return NotImplemented
return True
def _to_filter(self, bindings):
# TODO: Or use make_filter(name, '=', []) ?
raise ValueError('Cannot convert FalseNode to predicate')
def resolve(self):
return self
class FilterNode(Node):
def __new__(cls, name, opsymbol, value):
if opsymbol == '!=':
n1 = FilterNode(name, '<', value)
n2 = FilterNode(name, '>', value)
return DisjunctionNode([n1, n2])
if opsymbol == 'in' and not isinstance(value, Binding):
assert isinstance(value, (list, tuple, set, frozenset)), value
nodes = [FilterNode(name, '=', v) for v in value]
if not nodes:
return FalseNode()
if len(nodes) == 1:
return nodes[0]
return DisjunctionNode(nodes)
self = super(FilterNode, cls).__new__(cls)
self.__name = name
self.__opsymbol = opsymbol
self.__value = value
return self
def _sort_key(self):
return (self.__name, self.__opsymbol, self.__value)
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.__name, self.__opsymbol, self.__value)
def __eq__(self, other):
if not isinstance(other, FilterNode):
return NotImplemented
return (self.__name == other.__name and
self.__opsymbol == other.__opsymbol and
self.__value == other.__value)
def __lt__(self, other):
if not isinstance(other, FilterNode):
return NotImplemented
return self._sort_key() < other._sort_key()
def _to_filter(self, bindings):
assert self.__opsymbol not in ('!=', 'in'), self.__opsymbol
value = self.__value
if isinstance(value, Binding):
bindings[value.key] = value
value = value.resolve()
return datastore_query.make_filter(self.__name, self.__opsymbol, value)
def resolve(self):
if self.__opsymbol == 'in':
assert isinstance(self.__value, Binding)
return FilterNode(self.__name, self.__opsymbol, self.__value.resolve())
else:
return self
class PostFilterNode(Node):
def __new__(cls, filter_func, filter_arg):
self = super(PostFilterNode, cls).__new__(cls)
self.filter_func = filter_func
self.filter_arg = filter_arg
return self
def apply(self, entity):
return self.filter_func(self.filter_arg, entity)
def __eq__(self, other):
if not isinstance(other, PostFilterNode):
return NotImplemented
return self is other
def _to_filter(self, bindings):
return None
def resolve(self):
return self
class ConjunctionNode(Node):
# AND
def __new__(cls, nodes):
assert nodes
if len(nodes) == 1:
return nodes[0]
clauses = [[]] # Outer: Disjunction; inner: Conjunction.
# TODO: Remove duplicates?
for node in nodes:
assert isinstance(node, Node), node
if isinstance(node, DisjunctionNode):
# Apply the distributive law: (X or Y) and (A or B) becomes
# (X and A) or (X and B) or (Y and A) or (Y and B).
new_clauses = []
for clause in clauses:
for subnode in node:
new_clause = clause + [subnode]
new_clauses.append(new_clause)
clauses = new_clauses
elif isinstance(node, ConjunctionNode):
# Apply half of the distributive law: (X or Y) and A becomes
# (X and A) or (Y and A).
for clause in clauses:
clause.extend(node.__nodes)
else:
# Ditto.
for clause in clauses:
clause.append(node)
if not clauses:
return FalseNode()
if len(clauses) > 1:
return DisjunctionNode([ConjunctionNode(clause) for clause in clauses])
self = super(ConjunctionNode, cls).__new__(cls)
self.__nodes = clauses[0]
return self
def __iter__(self):
return iter(self.__nodes)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__nodes)
def __eq__(self, other):
if not isinstance(other, ConjunctionNode):
return NotImplemented
return self.__nodes == other.__nodes
def _to_filter(self, bindings):
filters = filter(None,
(node._to_filter(bindings) for node in self.__nodes))
return datastore_query.CompositeFilter(_AND, filters)
def _post_filters(self):
post_filters = [node for node in self.__nodes
if isinstance(node, PostFilterNode)]
if not post_filters:
return None
if len(post_filters) == 1:
return post_filters[0]
if post_filters == self.__nodes:
return self
return ConjunctionNode(post_filters)
def apply(self, entity):
for node in self.__nodes:
if not node.apply(entity):
return False
return True
def resolve(self):
nodes = [node.resolve() for node in self.__nodes]
if nodes == self.__nodes:
return self
return ConjunctionNode(nodes)
class DisjunctionNode(Node):
# OR
def __new__(cls, nodes):
assert nodes
if len(nodes) == 1:
return nodes[0]
self = super(DisjunctionNode, cls).__new__(cls)
self.__nodes = []
# TODO: Remove duplicates?
for node in nodes:
assert isinstance(node, Node), node
if isinstance(node, DisjunctionNode):
self.__nodes.extend(node.__nodes)
else:
self.__nodes.append(node)
return self
def __iter__(self):
return iter(self.__nodes)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__nodes)
def __eq__(self, other):
if not isinstance(other, DisjunctionNode):
return NotImplemented
return self.__nodes == other.__nodes
def resolve(self):
nodes = [node.resolve() for node in self.__nodes]
if nodes == self.__nodes:
return self
return DisjunctionNode(nodes)
# TODO: Change ConjunctionNode and DisjunctionNode signatures so that
# AND and OR can just be aliases for them -- or possibly even rename.
def AND(*args):
assert args
assert all(isinstance(arg, Node) for arg in args)
if len(args) == 1:
return args[0]
return ConjunctionNode(args)
def OR(*args):
assert args
assert all(isinstance(Node, arg) for arg in args)
if len(args) == 1:
return args[0]
return DisjunctionNode(args)
def _args_to_val(func, args, bindings):
vals = []
for arg in args:
if isinstance(arg, (int, long, basestring)):
if arg in bindings:
val = bindings[arg]
else:
val = Binding(None, arg)
bindings[arg] = val
elif isinstance(arg, gql.Literal):
val = arg.Get()
else:
assert False, 'Unexpected arg (%r)' % arg
vals.append(val)
if func == 'nop':
assert len(vals) == 1
return vals[0]
if func == 'list':
return vals
if func == 'key':
if len(vals) == 1 and isinstance(vals[0], basestring):
return model.Key(urlsafe=vals[0])
assert False, 'Unexpected key args (%r)' % (vals,)
assert False, 'Unexpected func (%r)' % func
# TODO: Not everybody likes GQL.
# TODO: GQL doesn't support querying for structured property values.
def parse_gql(query_string):
"""Parse a GQL query string.
Args:
query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'.
Returns:
A tuple (query, options, bindings) where query is a Query instance,
options a datastore_query.QueryOptions instance, and bindings a dict
mapping integers and strings to Binding instances.
"""
gql_qry = gql.GQL(query_string)
ancestor = None
flt = gql_qry.filters()
bindings = {}
filters = []
for ((name, op), values) in flt.iteritems():
op = op.lower()
if op == 'is' and name == gql.GQL._GQL__ANCESTOR:
assert len(values) == 1
[(func, args)] = values
ancestor = _args_to_val(func, args, bindings)
continue
assert op in _OPS.values()
for (func, args) in values:
val = _args_to_val(func, args, bindings)
filters.append(FilterNode(name, op, val))
if filters:
filters.sort() # For predictable tests.
filters = ConjunctionNode(filters)
else:
filters = None
orderings = gql_qry.orderings()
orders = []
for (name, direction) in orderings:
orders.append(datastore_query.PropertyOrder(name, direction))
if not orders:
orders = None
elif len(orders) == 1:
orders = orders[0]
else:
orders = datastore_query.CompositeOrder(orders)
qry = Query(kind=gql_qry._entity,
ancestor=ancestor,
filters=filters,
orders=orders)
offset = gql_qry.offset()
if offset < 0:
offset = None
limit = gql_qry.limit()
if limit < 0:
limit = None
options = QueryOptions(offset=offset, limit=limit)
return qry, options, bindings
class Query(object):
@datastore_rpc._positional(1)
def __init__(self, kind=None, ancestor=None, filters=None, orders=None):
if ancestor is not None and not isinstance(ancestor, Binding):
lastid = ancestor.pairs()[-1][1]
assert lastid, 'ancestor cannot be an incomplete key'
if filters is not None:
assert isinstance(filters, Node)
if orders is not None:
assert isinstance(orders, datastore_query.Order)
self.__kind = kind # String
self.__ancestor = ancestor # Key
self.__filters = filters # None or Node subclass
self.__orders = orders # None or datastore_query.Order instance
# TODO: __repr__().
def _get_query(self, connection):
kind = self.__kind
ancestor = self.__ancestor
bindings = {}
if isinstance(ancestor, Binding):
bindings[ancestor.key] = ancestor
ancestor = ancestor.resolve()
if ancestor is not None:
ancestor = connection.adapter.key_to_pb(ancestor)
filters = self.__filters
post_filters = None
if filters is not None:
post_filters = filters._post_filters()
filters = filters._to_filter(bindings)
dsqry = datastore_query.Query(kind=kind,
ancestor=ancestor,
filter_predicate=filters,
order=self.__orders)
return dsqry, post_filters
@tasklets.tasklet
def run_to_queue(self, queue, conn, options=None):
"""Run this query, putting entities into the given queue."""
multiquery = self._maybe_multi_query()
if multiquery is not None:
multiquery.run_to_queue(queue, conn, options=options) # No return value.
return
dsqry, post_filters = self._get_query(conn)
orig_options = options
if (post_filters and options is not None and
(options.offset or options.limit is not None)):
options = datastore_query.QueryOptions(offset=None, limit=None,
config=orig_options)
assert options.limit is None and options.limit is None
rpc = dsqry.run_async(conn, options)
skipped = 0
count = 0
while rpc is not None:
batch = yield rpc
rpc = batch.next_batch_async(options)
for ent in batch.results:
if post_filters:
if not post_filters.apply(ent):
continue
if orig_options is not options:
if orig_options.offset and skipped < orig_options.offset:
skipped += 1
continue
if orig_options.limit is not None and count >= orig_options.limit:
rpc = None # Quietly throw away the next batch.
break
count += 1
queue.putq(ent)
queue.complete()
def _maybe_multi_query(self):
filters = self.__filters
if filters is not None:
filters = filters.resolve()
if isinstance(filters, DisjunctionNode):
# Switch to a MultiQuery.
subqueries = []
for subfilter in filters:
subquery = Query(kind=self.__kind, ancestor=self.__ancestor,
filters=subfilter, orders=self.__orders)
subqueries.append(subquery)
return MultiQuery(subqueries, orders=self.__orders)
return None
@property
def kind(self):
return self.__kind
@property
def ancestor(self):
return self.__ancestor
@property
def filters(self):
return self.__filters
@property
def orders(self):
return self.__orders
def filter(self, *args):
if not args:
return self
preds = []
f = self.filters
if f:
preds.append(f)
for arg in args:
assert isinstance(arg, Node)
preds.append(arg)
if not preds:
pred = None
elif len(preds) == 1:
pred = preds[0]
else:
pred = ConjunctionNode(preds)
return self.__class__(kind=self.kind, ancestor=self.ancestor,
orders=self.orders, filters=pred)
# TODO: Change this to .order(<property>, -<property>, ...).
def order(self, *args):
# q.order(Eployee.name, -Employee.age)
if not args:
return self
orders = []
o = self.__orders
if o:
orders.append(o)
for arg in args:
if isinstance(arg, model.Property):
orders.append(datastore_query.PropertyOrder(arg._name, ASC))
elif isinstance(arg, datastore_query.Order):
orders.append(arg)
else:
assert False, arg
if not orders:
orders = None
elif len(orders) == 1:
orders = orders[0]
else:
orders = datastore_query.CompositeOrder(orders)
return self.__class__(kind=self.kind, ancestor=self.ancestor,
filters=self.filters, orders=orders)
# Datastore API using the default context.
def iter(self, options=None):
return QueryIterator(self, options=options)
__iter__ = iter
# TODO: support the rest for MultiQuery.
def map(self, callback, options=None, merge_future=None):
return self.map_async(callback, options=options,
merge_future=merge_future).get_result()
def map_async(self, callback, options=None, merge_future=None):
return tasklets.get_context().map_query(self, callback,
options=options,
merge_future=merge_future)
def fetch(self, limit, offset=0, options=None):
return self.fetch_async(limit, offset, options=options).get_result()
@tasklets.tasklet
def fetch_async(self, limit, offset=0, options=None):
options = QueryOptions(limit=limit,
prefetch_size=limit,
batch_size=limit,
offset=offset,
config=options)
res = []
it = self.iter(options)
while (yield it.has_next_async()):
res.append(it.next())
raise tasklets.Return(res)
def get(self, options=None):
return self.get_async(options=options).get_result()
@tasklets.tasklet
def get_async(self, options=None):
res = yield self.fetch_async(1, options=options)
if not res:
raise tasklets.Return(None)
raise tasklets.Return(res[0])
def count(self, limit, options=None):
return self.count_async(limit, options=options).get_result()
@tasklets.tasklet
def count_async(self, limit, options=None):
conn = tasklets.get_context()._conn
options = QueryOptions(offset=limit, limit=0, config=options)
dsqry, post_filters = self._get_query(conn)
if post_filters:
raise datastore_errors.BadQueryError(
'Post-filters are not supported for count().')
rpc = dsqry.run_async(conn, options)
total = 0
while rpc is not None:
batch = yield rpc
rpc = batch.next_batch_async(options)
total += batch.skipped_results
raise tasklets.Return(total)
class QueryIterator(object):
"""This iterator works both for synchronous and async callers!
For synchronous callers, just use:
for entity in Account.query():
<use entity>
Async callers use this idiom:
it = iter(Account.query())
while (yield it.has_next_async()):
entity = it.next()
<use entity>
"""
def __init__(self, query, options=None):
ctx = tasklets.get_context()
self._iter = ctx.iter_query(query, options=options)
self._fut = None
def __iter__(self):
return self
def has_next(self):
return self.has_next_async().get_result()
@tasklets.tasklet
def has_next_async(self):
if self._fut is None:
self._fut = self._iter.getq()
flag = True
try:
yield self._fut
except EOFError:
flag = False
raise tasklets.Return(flag)
def next(self):
if self._fut is None:
self._fut = self._iter.getq()
try:
try:
return self._fut.get_result()
except EOFError:
raise StopIteration
finally:
self._fut = None
class _SubQueryIteratorState(object):
# Helper class for MultiQuery.
def __init__(self, entity, iterator, orderings):
self.entity = entity
self.iterator = iterator
self.orderings = orderings
def __cmp__(self, other):
assert isinstance(other, _SubQueryIteratorState)
assert self.orderings == other.orderings
our_entity = self.entity
their_entity = other.entity
# TODO: Renamed properties again.
if self.orderings:
for propname, direction in self.orderings:
our_value = getattr(our_entity, propname, None)
their_value = getattr(their_entity, propname, None)
# NOTE: Repeated properties sort by lowest value when in
# ascending order and highest value when in descending order.
# TODO: Use min_max_value_cache as datastore.py does?
if direction == ASC:
func = min
else:
func = max
if isinstance(our_value, list):
our_value = func(our_value)
if isinstance(their_value, list):
their_value = func(their_value)
flag = cmp(our_value, their_value)
if direction == DESC:
flag = -flag
if flag:
return flag
# All considered properties are equal; compare by key (ascending).
# TODO: Comparison between ints and strings is arbitrary.
return cmp(our_entity.key.pairs(), their_entity.key.pairs())
class MultiQuery(object):
# This is not created by the user directly, but implicitly by using
# a where() call with an __in or __ne operator. In the future
# or_where() can also use this. Note that some options must be
# interpreted by MultiQuery instead of passed to the underlying
# Queries' methods, e.g. offset (though not necessarily limit, and
# I'm not sure about cursors).
def __init__(self, subqueries, orders=None):
assert isinstance(subqueries, list), subqueries
assert all(isinstance(subq, Query) for subq in subqueries), subqueries
if orders is not None:
assert isinstance(orders, datastore_query.Order)
self.__subqueries = subqueries
self.__orders = orders
self.ancestor = None # Hack for map_query().
@tasklets.tasklet
def run_to_queue(self, queue, conn, options=None):
"""Run this query, putting entities into the given queue."""
# Create a list of (first-entity, subquery-iterator) tuples.
# TODO: Use the specified sort order.
assert options is None # Don't know what to do with these yet.
state = []
orderings = orders_to_orderings(self.__orders)
for subq in self.__subqueries:
subit = tasklets.SerialQueueFuture('MultiQuery.run_to_queue')
subq.run_to_queue(subit, conn)
try:
ent = yield subit.getq()
except EOFError:
continue
else:
state.append(_SubQueryIteratorState(ent, subit, orderings))
# Now turn it into a sorted heap. The heapq module claims that
# calling heapify() is more efficient than calling heappush() for
# each item.
heapq.heapify(state)
# Repeatedly yield the lowest entity from the state vector,
# filtering duplicates. This is essentially a multi-way merge
# sort. One would think it should be possible to filter
# duplicates simply by dropping other entities already in the
# state vector that are equal to the lowest entity, but because of
# the weird sorting of repeated properties, we have to explicitly
# keep a set of all keys, so we can remove later occurrences.
# Yes, this means that the output may not be sorted correctly.
# Too bad. (I suppose you can do this in constant memory bounded
# by the maximum number of entries in relevant repeated
# properties, but I'm too lazy for now. And yes, all this means
# MultiQuery is a bit of a toy. But where it works, it beats
# expecting the user to do this themselves.)
keys_seen = set()
while state:
item = heapq.heappop(state)
ent = item.entity
if ent.key not in keys_seen:
keys_seen.add(ent.key)
queue.putq(ent)
subit = item.iterator
try:
ent = yield subit.getq()
except EOFError:
pass
else:
item.entity = ent
heapq.heappush(state, item)
queue.complete()
# Datastore API using the default context.
def iter(self, options=None):
return QueryIterator(self, options=options)
__iter__ = iter
def order_to_ordering(order):
pb = order._to_pb()
return (pb.property(), pb.direction()) # TODO: What about UTF-8?
def orders_to_orderings(orders):
if orders is None:
return []
if isinstance(orders, datastore_query.PropertyOrder):
return [order_to_ordering(orders)]
if isinstance(orders, datastore_query.CompositeOrder):
# TODO: What about UTF-8?
return [(pb.property(), pb.direction())for pb in orders._to_pbs()]
assert False, orders
def ordering_to_order(ordering):
name, direction = ordering
return datastore_query.PropertyOrder(name, direction)
def orderings_to_orders(orderings):
orders = [ordering_to_order(o) for o in orderings]
if not orders:
return None
if len(orders) == 1:
return orders[0]
return datastore_query.CompositeOrder(orders)
| Python |
"""Tests for tasklets.py."""
import os
import re
import random
import sys
import time
import unittest
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.datastore import datastore_rpc
from ndb import eventloop
from ndb import model
from ndb import test_utils
from ndb import tasklets
from ndb.tasklets import Future, tasklet
class TaskletTests(test_utils.DatastoreTest):
def setUp(self):
super(TaskletTests, self).setUp()
if eventloop._EVENT_LOOP_KEY in os.environ:
del os.environ[eventloop._EVENT_LOOP_KEY]
if tasklets._CONTEXT_KEY in os.environ:
del os.environ[tasklets._CONTEXT_KEY]
self.ev = eventloop.get_event_loop()
self.log = []
def universal_callback(self, *args):
self.log.append(args)
def testFuture_Constructor(self):
f = tasklets.Future()
self.assertEqual(f._result, None)
self.assertEqual(f._exception, None)
self.assertEqual(f._callbacks, [])
def testFuture_Repr(self):
f = tasklets.Future()
prefix = (r'<Future [\da-f]+ created by '
r'testFuture_Repr\(tasklets_test.py:\d+\) ')
self.assertTrue(re.match(prefix + r'pending>$', repr(f)), repr(f))
f.set_result('abc')
self.assertTrue(re.match(prefix + r'result \'abc\'>$', repr(f)), repr(f))
f = tasklets.Future()
f.set_exception(RuntimeError('abc'))
self.assertTrue(re.match(prefix + r'exception RuntimeError: abc>$',
repr(f)),
repr(f))
def testFuture_Done_State(self):
f = tasklets.Future()
self.assertFalse(f.done())
self.assertEqual(f.state, f.RUNNING)
f.set_result(42)
self.assertTrue(f.done())
self.assertEqual(f.state, f.FINISHING)
def testFuture_SetResult(self):
f = tasklets.Future()
f.set_result(42)
self.assertEqual(f._result, 42)
self.assertEqual(f._exception, None)
self.assertEqual(f.get_result(), 42)
def testFuture_SetException(self):
f = tasklets.Future()
err = RuntimeError(42)
f.set_exception(err)
self.assertEqual(f.done(), True)
self.assertEqual(f._exception, err)
self.assertEqual(f._result, None)
self.assertEqual(f.get_exception(), err)
self.assertRaises(RuntimeError, f.get_result)
def testFuture_AddDoneCallback_SetResult(self):
f = tasklets.Future()
f.add_callback(self.universal_callback, f)
self.assertEqual(self.log, []) # Nothing happened yet.
f.set_result(42)
eventloop.run()
self.assertEqual(self.log, [(f,)])
def testFuture_SetResult_AddDoneCallback(self):
f = tasklets.Future()
f.set_result(42)
self.assertEqual(f.get_result(), 42)
f.add_callback(self.universal_callback, f)
eventloop.run()
self.assertEqual(self.log, [(f,)])
def testFuture_AddDoneCallback_SetException(self):
f = tasklets.Future()
f.add_callback(self.universal_callback, f)
f.set_exception(RuntimeError(42))
eventloop.run()
self.assertEqual(self.log, [(f,)])
self.assertEqual(f.done(), True)
def create_futures(self):
self.futs = []
for i in range(5):
f = tasklets.Future()
f.add_callback(self.universal_callback, f)
def wake(fut, result):
fut.set_result(result)
self.ev.queue_call(i*0.01, wake, f, i)
self.futs.append(f)
return set(self.futs)
def testFuture_WaitAny(self):
self.assertEqual(tasklets.Future.wait_any([]), None)
todo = self.create_futures()
while todo:
f = tasklets.Future.wait_any(todo)
todo.remove(f)
eventloop.run()
self.assertEqual(self.log, [(f,) for f in self.futs])
def testFuture_WaitAll(self):
todo = self.create_futures()
tasklets.Future.wait_all(todo)
self.assertEqual(self.log, [(f,) for f in self.futs])
def testSleep(self):
log = []
@tasklets.tasklet
def foo():
log.append(time.time())
yield tasklets.sleep(0.1)
log.append(time.time())
foo()
eventloop.run()
t0, t1 = log
dt = t1-t0
self.assertAlmostEqual(dt, 0.1, places=2)
def testMultiFuture(self):
@tasklets.tasklet
def foo(dt):
yield tasklets.sleep(dt)
raise tasklets.Return('foo-%s' % dt)
@tasklets.tasklet
def bar(n):
for i in range(n):
yield tasklets.sleep(0.01)
raise tasklets.Return('bar-%d' % n)
bar5 = bar(5)
futs = [foo(0.05), foo(0.01), foo(0.03), bar(3), bar5, bar5]
mfut = tasklets.MultiFuture()
for fut in futs:
mfut.add_dependent(fut)
mfut.complete()
results = mfut.get_result()
self.assertEqual(set(results),
set(['foo-0.01', 'foo-0.03', 'foo-0.05',
'bar-3', 'bar-5']))
def testMultiFuture_PreCompleted(self):
@tasklets.tasklet
def foo():
yield tasklets.sleep(0.01)
raise tasklets.Return(42)
mfut = tasklets.MultiFuture()
dep = foo()
dep.wait()
mfut.add_dependent(dep)
mfut.complete()
eventloop.run()
self.assertTrue(mfut.done())
self.assertEqual(mfut.get_result(), [42])
def testQueueFuture(self):
q = tasklets.QueueFuture()
@tasklets.tasklet
def produce_one(i):
yield tasklets.sleep(i * 0.01)
raise tasklets.Return(i)
@tasklets.tasklet
def producer():
q.putq(0)
for i in range(1, 10):
q.add_dependent(produce_one(i))
q.complete()
@tasklets.tasklet
def consumer():
for i in range(10):
val = yield q.getq()
self.assertEqual(val, i)
yield q
self.assertRaises(EOFError, q.getq().get_result)
@tasklets.tasklet
def foo():
yield producer(), consumer()
foo().get_result()
def testSerialQueueFuture(self):
q = tasklets.SerialQueueFuture()
@tasklets.tasklet
def produce_one(i):
yield tasklets.sleep(random.randrange(10) * 0.01)
raise tasklets.Return(i)
@tasklets.tasklet
def producer():
for i in range(10):
q.add_dependent(produce_one(i))
q.complete()
@tasklets.tasklet
def consumer():
for i in range(10):
val = yield q.getq()
self.assertEqual(val, i)
yield q
self.assertRaises(EOFError, q.getq().get_result)
yield q
@tasklets.synctasklet
def foo():
yield producer(), consumer()
foo()
def testReducerFuture(self):
@tasklets.tasklet
def sum_tasklet(arg):
yield tasklets.sleep(0.01)
raise tasklets.Return(sum(arg))
@tasklets.tasklet
def produce_one(i):
yield tasklets.sleep(i * 0.01)
raise tasklets.Return(i)
@tasklets.tasklet
def producer():
for i in range(10):
q.add_dependent(produce_one(i))
q.complete()
@tasklets.tasklet
def consumer():
total = yield q
self.assertEqual(total, sum(range(10)))
@tasklets.tasklet
def foo():
yield producer(), consumer()
q = tasklets.ReducingFuture(sum_tasklet, batch_size=3)
foo().get_result()
q = tasklets.ReducingFuture(sum, batch_size=3)
foo().get_result()
def testGetReturnValue(self):
r0 = tasklets.Return()
r1 = tasklets.Return(42)
r2 = tasklets.Return(42, 'hello')
r3 = tasklets.Return((1, 2, 3))
self.assertEqual(tasklets.get_return_value(r0), None)
self.assertEqual(tasklets.get_return_value(r1), 42)
self.assertEqual(tasklets.get_return_value(r2), (42, 'hello'))
self.assertEqual(tasklets.get_return_value(r3), (1, 2, 3))
def testTasklets_Basic(self):
@tasklets.tasklet
def t1():
a = yield t2(3)
b = yield t3(2)
raise tasklets.Return(a + b)
@tasklets.tasklet
def t2(n):
raise tasklets.Return(n)
@tasklets.tasklet
def t3(n):
return n
x = t1()
self.assertTrue(isinstance(x, tasklets.Future))
y = x.get_result()
self.assertEqual(y, 5)
def testTasklets_Raising(self):
@tasklets.tasklet
def t1():
f = t2(True)
try:
a = yield f
except RuntimeError, err:
self.assertEqual(f.get_exception(), err)
raise tasklets.Return(str(err))
@tasklets.tasklet
def t2(error):
if error:
raise RuntimeError('hello')
else:
yield tasklets.Future()
x = t1()
y = x.get_result()
self.assertEqual(y, 'hello')
def testTasklets_YieldRpcs(self):
@tasklets.tasklet
def main_tasklet():
rpc1 = self.conn.async_get(None, [])
rpc2 = self.conn.async_put(None, [])
res1 = yield rpc1
res2 = yield rpc2
raise tasklets.Return(res1, res2)
f = main_tasklet()
result = f.get_result()
self.assertEqual(result, ([], []))
def testTasklet_YieldTuple(self):
@tasklets.tasklet
def fib(n):
if n <= 1:
raise tasklets.Return(n)
a, b = yield fib(n - 1), fib(n - 2)
# print 'fib(%r) = %r + %r = %r' % (n, a, b, a + b)
self.assertTrue(a >= b, (a, b))
raise tasklets.Return(a + b)
fut = fib(10)
val = fut.get_result()
self.assertEqual(val, 55)
class TracebackTests(unittest.TestCase):
"""Checks that errors result in reasonable tracebacks."""
def testBasicError(self):
frames = [sys._getframe()]
@tasklets.tasklet
def level3():
frames.append(sys._getframe())
raise RuntimeError('hello')
yield
@tasklets.tasklet
def level2():
frames.append(sys._getframe())
yield level3()
@tasklets.tasklet
def level1():
frames.append(sys._getframe())
yield level2()
@tasklets.tasklet
def level0():
frames.append(sys._getframe())
yield level1()
fut = level0()
try:
fut.check_success()
except RuntimeError, err:
_, _, tb = sys.exc_info()
self.assertEqual(str(err), 'hello')
tbframes = []
while tb is not None:
# It's okay if some _help_tasklet_along frames are present.
if tb.tb_frame.f_code.co_name != '_help_tasklet_along':
tbframes.append(tb.tb_frame)
tb = tb.tb_next
self.assertEqual(frames, tbframes)
else:
self.fail('Expected RuntimeError not raised')
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Tests for model.py."""
import base64
import datetime
import difflib
import pickle
import re
import unittest
from google.appengine.api import datastore_errors
from google.appengine.api import users
from google.appengine.datastore import entity_pb
from ndb import model, query, test_utils
TESTUSER = users.User('test@example.com', 'example.com', '123')
AMSTERDAM = model.GeoPt(52.35, 4.9166667)
GOLDEN_PB = """\
key <
app: "_"
path <
Element {
type: "Model"
id: 42
}
>
>
entity_group <
Element {
type: "Model"
id: 42
}
>
property <
name: "b"
value <
booleanValue: true
>
multiple: false
>
property <
name: "d"
value <
doubleValue: 2.5
>
multiple: false
>
property <
name: "k"
value <
ReferenceValue {
app: "_"
PathElement {
type: "Model"
id: 42
}
}
>
multiple: false
>
property <
name: "p"
value <
int64Value: 42
>
multiple: false
>
property <
name: "q"
value <
stringValue: "hello"
>
multiple: false
>
property <
name: "u"
value <
UserValue {
email: "test@example.com"
auth_domain: "example.com"
gaiaid: 0
obfuscated_gaiaid: "123"
}
>
multiple: false
>
property <
name: "xy"
value <
PointValue {
x: 52.35
y: 4.9166667
}
>
multiple: false
>
"""
INDEXED_PB = re.sub('Model', 'MyModel', GOLDEN_PB)
UNINDEXED_PB = """\
key <
app: "_"
path <
Element {
type: "MyModel"
id: 0
}
>
>
entity_group <
>
raw_property <
meaning: 14
name: "b"
value <
stringValue: "\\000\\377"
>
multiple: false
>
raw_property <
meaning: 15
name: "t"
value <
stringValue: "Hello world\\341\\210\\264"
>
multiple: false
>
"""
PERSON_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.city"
value <
stringValue: "Mountain View"
>
multiple: false
>
property <
name: "address.street"
value <
stringValue: "1600 Amphitheatre"
>
multiple: false
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
NESTED_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.home.city"
value <
stringValue: "Mountain View"
>
multiple: false
>
property <
name: "address.home.street"
value <
stringValue: "1600 Amphitheatre"
>
multiple: false
>
property <
name: "address.work.city"
value <
stringValue: "San Francisco"
>
multiple: false
>
property <
name: "address.work.street"
value <
stringValue: "345 Spear"
>
multiple: false
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
RECURSIVE_PB = """\
key <
app: "_"
path <
Element {
type: "Tree"
id: 0
}
>
>
entity_group <
>
raw_property <
meaning: 15
name: "root.left.left.name"
value <
stringValue: "a1a"
>
multiple: false
>
raw_property <
meaning: 15
name: "root.left.name"
value <
stringValue: "a1"
>
multiple: false
>
raw_property <
meaning: 15
name: "root.left.rite.name"
value <
stringValue: "a1b"
>
multiple: false
>
raw_property <
meaning: 15
name: "root.name"
value <
stringValue: "a"
>
multiple: false
>
raw_property <
meaning: 15
name: "root.rite.name"
value <
stringValue: "a2"
>
multiple: false
>
raw_property <
meaning: 15
name: "root.rite.rite.name"
value <
stringValue: "a2b"
>
multiple: false
>
"""
MULTI_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address"
value <
stringValue: "345 Spear"
>
multiple: true
>
property <
name: "address"
value <
stringValue: "San Francisco"
>
multiple: true
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
MULTIINSTRUCT_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.label"
value <
stringValue: "work"
>
multiple: false
>
property <
name: "address.line"
value <
stringValue: "345 Spear"
>
multiple: true
>
property <
name: "address.line"
value <
stringValue: "San Francisco"
>
multiple: true
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
MULTISTRUCT_PB = """\
key <
app: "_"
path <
Element {
type: "Person"
id: 0
}
>
>
entity_group <
>
property <
name: "address.label"
value <
stringValue: "work"
>
multiple: true
>
property <
name: "address.text"
value <
stringValue: "San Francisco"
>
multiple: true
>
property <
name: "address.label"
value <
stringValue: "home"
>
multiple: true
>
property <
name: "address.text"
value <
stringValue: "Mountain View"
>
multiple: true
>
property <
name: "name"
value <
stringValue: "Google"
>
multiple: false
>
"""
class ModelTests(test_utils.DatastoreTest):
def tearDown(self):
self.assertTrue(model.Model._properties == {})
self.assertTrue(model.Expando._properties == {})
super(ModelTests, self).tearDown()
def testKey(self):
m = model.Model()
self.assertEqual(m.key, None)
k = model.Key(flat=['ParentModel', 42, 'Model', 'foobar'])
m.key = k
self.assertEqual(m.key, k)
del m.key
self.assertEqual(m.key, None)
# incomplete key
k2 = model.Key(flat=['ParentModel', 42, 'Model', None])
m.key = k2
self.assertEqual(m.key, k2)
def testIncompleteKey(self):
m = model.Model()
k = model.Key(flat=['Model', None])
m.key = k
pb = m.ToPb()
m2 = model.Model()
m2.FromPb(pb)
self.assertEqual(m2, m)
def testIdAndParent(self):
p = model.Key('ParentModel', 'foo')
# key name
m = model.Model(id='bar')
m2 = model.Model()
m2.FromPb(m.ToPb())
self.assertEqual(m2.key, model.Key('Model', 'bar'))
# key name + parent
m = model.Model(id='bar', parent=p)
m2 = model.Model()
m2.FromPb(m.ToPb())
self.assertEqual(m2.key, model.Key('ParentModel', 'foo', 'Model', 'bar'))
# key id
m = model.Model(id=42)
m2 = model.Model()
m2.FromPb(m.ToPb())
self.assertEqual(m2.key, model.Key('Model', 42))
# key id + parent
m = model.Model(id=42, parent=p)
m2 = model.Model()
m2.FromPb(m.ToPb())
self.assertEqual(m2.key, model.Key('ParentModel', 'foo', 'Model', 42))
# parent
m = model.Model(parent=p)
m2 = model.Model()
m2.FromPb(m.ToPb())
self.assertEqual(m2.key, model.Key('ParentModel', 'foo', 'Model', None))
# not key -- invalid
self.assertRaises(datastore_errors.BadValueError, model.Model, key='foo')
# wrong key kind -- invalid
k = model.Key('OtherModel', 'bar')
self.assertRaises(model.KindError, model.Model, key=k)
# incomplete parent -- invalid
p2 = model.Key('ParentModel', None)
self.assertRaises(datastore_errors.BadArgumentError, model.Model,
parent=p2)
self.assertRaises(datastore_errors.BadArgumentError, model.Model,
id='bar', parent=p2)
# key + id -- invalid
k = model.Key('Model', 'bar')
self.assertRaises(datastore_errors.BadArgumentError, model.Model, key=k,
id='bar')
# key + parent -- invalid
k = model.Key('Model', 'bar', parent=p)
self.assertRaises(datastore_errors.BadArgumentError, model.Model, key=k,
parent=p)
# key + id + parent -- invalid
self.assertRaises(datastore_errors.BadArgumentError, model.Model, key=k,
id='bar', parent=p)
def testQuery(self):
class MyModel(model.Model):
p = model.IntegerProperty()
q = MyModel.query()
self.assertTrue(isinstance(q, query.Query))
self.assertEqual(q.kind, 'MyModel')
self.assertEqual(q.ancestor, None)
k = model.Key(flat=['Model', 1])
q = MyModel.query(ancestor=k)
self.assertEqual(q.kind, 'MyModel')
self.assertEqual(q.ancestor, k)
k0 = model.Key(flat=['Model', None])
self.assertRaises(Exception, MyModel.query, ancestor=k0)
def testQueryWithFilter(self):
class MyModel(model.Model):
p = model.IntegerProperty()
q = MyModel.query(MyModel.p >= 0)
self.assertTrue(isinstance(q, query.Query))
self.assertEqual(q.kind, 'MyModel')
self.assertEqual(q.ancestor, None)
self.assertTrue(q.filters is not None)
q2 = MyModel.query().filter(MyModel.p >= 0)
self.assertEqual(q.filters, q2.filters)
def testProperty(self):
class MyModel(model.Model):
b = model.BooleanProperty()
p = model.IntegerProperty()
q = model.StringProperty()
d = model.FloatProperty()
k = model.KeyProperty()
u = model.UserProperty()
xy = model.GeoPtProperty()
ent = MyModel()
k = model.Key(flat=['MyModel', 42])
ent.key = k
MyModel.b.SetValue(ent, True)
MyModel.p.SetValue(ent, 42)
MyModel.q.SetValue(ent, 'hello')
MyModel.d.SetValue(ent, 2.5)
MyModel.k.SetValue(ent, k)
MyModel.u.SetValue(ent, TESTUSER)
MyModel.xy.SetValue(ent, AMSTERDAM)
self.assertEqual(MyModel.b.GetValue(ent), True)
self.assertEqual(MyModel.p.GetValue(ent), 42)
self.assertEqual(MyModel.q.GetValue(ent), 'hello')
self.assertEqual(MyModel.d.GetValue(ent), 2.5)
self.assertEqual(MyModel.k.GetValue(ent), k)
self.assertEqual(MyModel.u.GetValue(ent), TESTUSER)
self.assertEqual(MyModel.xy.GetValue(ent), AMSTERDAM)
pb = self.conn.adapter.entity_to_pb(ent)
self.assertEqual(str(pb), INDEXED_PB)
ent = MyModel()
ent.FromPb(pb)
self.assertEqual(ent.GetKind(), 'MyModel')
k = model.Key(flat=['MyModel', 42])
self.assertEqual(ent.key, k)
self.assertEqual(MyModel.p.GetValue(ent), 42)
self.assertEqual(MyModel.q.GetValue(ent), 'hello')
self.assertEqual(MyModel.d.GetValue(ent), 2.5)
self.assertEqual(MyModel.k.GetValue(ent), k)
def testUnindexedProperty(self):
class MyModel(model.Model):
t = model.TextProperty()
b = model.BlobProperty()
ent = MyModel()
MyModel.t.SetValue(ent, u'Hello world\u1234')
MyModel.b.SetValue(ent, '\x00\xff')
self.assertEqual(MyModel.t.GetValue(ent), u'Hello world\u1234')
self.assertEqual(MyModel.b.GetValue(ent), '\x00\xff')
pb = ent.ToPb()
self.assertEqual(str(pb), UNINDEXED_PB)
ent = MyModel()
ent.FromPb(pb)
self.assertEqual(ent.GetKind(), 'MyModel')
k = model.Key(flat=['MyModel', None])
self.assertEqual(ent.key, k)
self.assertEqual(MyModel.t.GetValue(ent), u'Hello world\u1234')
self.assertEqual(MyModel.b.GetValue(ent), '\x00\xff')
def testGeoPt(self):
# Test for the GeoPt type itself.
p = model.GeoPt(3.14, 42)
self.assertEqual(p.lat, 3.14)
self.assertEqual(p.lon, 42.0)
self.assertEqual(repr(p), 'GeoPt(3.14, 42)')
def DateAndOrTimePropertyTest(self, propclass, t1, t2):
class Person(model.Model):
name = model.StringProperty()
ctime = propclass(auto_now_add=True)
mtime = propclass(auto_now=True)
atime = propclass()
times = propclass(repeated=True)
p = Person()
p.atime = t1
p.times = [t1, t2]
self.assertEqual(p.ctime, None)
self.assertEqual(p.mtime, None)
pb = p.ToPb()
self.assertNotEqual(p.ctime, None)
self.assertNotEqual(p.mtime, None)
q = Person()
q.FromPb(pb)
self.assertEqual(q.ctime, p.ctime)
self.assertEqual(q.mtime, p.mtime)
self.assertEqual(q.atime, t1)
self.assertEqual(q.times, [t1, t2])
def testDateTimeProperty(self):
self.DateAndOrTimePropertyTest(model.DateTimeProperty,
datetime.datetime(1982, 12, 1, 9, 0, 0),
datetime.datetime(1995, 4, 15, 5, 0, 0))
def testDateProperty(self):
self.DateAndOrTimePropertyTest(model.DateProperty,
datetime.date(1982, 12, 1),
datetime.date(1995, 4, 15))
def testTimeProperty(self):
self.DateAndOrTimePropertyTest(model.TimeProperty,
datetime.time(9, 0, 0),
datetime.time(5, 0, 0, 500))
def testStructuredProperty(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address)
p = Person()
p.name = 'Google'
a = Address(street='1600 Amphitheatre')
p.address = a
p.address.city = 'Mountain View'
self.assertEqual(Person.name.GetValue(p), 'Google')
self.assertEqual(p.name, 'Google')
self.assertEqual(Person.address.GetValue(p), a)
self.assertEqual(Address.street.GetValue(a), '1600 Amphitheatre')
self.assertEqual(Address.city.GetValue(a), 'Mountain View')
pb = p.ToPb()
self.assertEqual(str(pb), PERSON_PB)
p = Person()
p.FromPb(pb)
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address.street, '1600 Amphitheatre')
self.assertEqual(p.address.city, 'Mountain View')
self.assertEqual(p.address, a)
def testNestedStructuredProperty(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class AddressPair(model.Model):
home = model.StructuredProperty(Address)
work = model.StructuredProperty(Address)
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(AddressPair)
p = Person()
p.name = 'Google'
p.address = AddressPair(home=Address(), work=Address())
p.address.home.city = 'Mountain View'
p.address.home.street = '1600 Amphitheatre'
p.address.work.city = 'San Francisco'
p.address.work.street = '345 Spear'
pb = p.ToPb()
self.assertEqual(str(pb), NESTED_PB)
p = Person()
p.FromPb(pb)
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address.home.street, '1600 Amphitheatre')
self.assertEqual(p.address.home.city, 'Mountain View')
self.assertEqual(p.address.work.street, '345 Spear')
self.assertEqual(p.address.work.city, 'San Francisco')
def testRecursiveStructuredProperty(self):
class Node(model.Model):
name = model.StringProperty(indexed=False)
Node.left = model.StructuredProperty(Node)
Node.rite = model.StructuredProperty(Node)
Node.FixUpProperties()
class Tree(model.Model):
root = model.StructuredProperty(Node)
k = model.Key(flat=['Tree', None])
tree = Tree()
tree.key = k
tree.root = Node(name='a',
left=Node(name='a1',
left=Node(name='a1a'),
rite=Node(name='a1b')),
rite=Node(name='a2',
rite=Node(name='a2b')))
pb = tree.ToPb()
self.assertEqual(str(pb), RECURSIVE_PB)
tree2 = Tree()
tree2.FromPb(pb)
self.assertEqual(tree2, tree)
def testRenamedProperty(self):
class MyModel(model.Model):
bb = model.BooleanProperty('b')
pp = model.IntegerProperty('p')
qq = model.StringProperty('q')
dd = model.FloatProperty('d')
kk = model.KeyProperty('k')
uu = model.UserProperty('u')
xxyy = model.GeoPtProperty('xy')
ent = MyModel()
k = model.Key(flat=['MyModel', 42])
ent.key = k
MyModel.bb.SetValue(ent, True)
MyModel.pp.SetValue(ent, 42)
MyModel.qq.SetValue(ent, 'hello')
MyModel.dd.SetValue(ent, 2.5)
MyModel.kk.SetValue(ent, k)
MyModel.uu.SetValue(ent, TESTUSER)
MyModel.xxyy.SetValue(ent, AMSTERDAM)
self.assertEqual(MyModel.pp.GetValue(ent), 42)
self.assertEqual(MyModel.qq.GetValue(ent), 'hello')
self.assertEqual(MyModel.dd.GetValue(ent), 2.5)
self.assertEqual(MyModel.kk.GetValue(ent), k)
self.assertEqual(MyModel.uu.GetValue(ent), TESTUSER)
self.assertEqual(MyModel.xxyy.GetValue(ent), AMSTERDAM)
pb = self.conn.adapter.entity_to_pb(ent)
self.assertEqual(str(pb), INDEXED_PB)
ent = MyModel()
ent.FromPb(pb)
self.assertEqual(ent.GetKind(), 'MyModel')
k = model.Key(flat=['MyModel', 42])
self.assertEqual(ent.key, k)
self.assertEqual(MyModel.pp.GetValue(ent), 42)
self.assertEqual(MyModel.qq.GetValue(ent), 'hello')
self.assertEqual(MyModel.dd.GetValue(ent), 2.5)
self.assertEqual(MyModel.kk.GetValue(ent), k)
def testRenamedStructuredProperty(self):
class Address(model.Model):
st = model.StringProperty('street')
ci = model.StringProperty('city')
class AddressPair(model.Model):
ho = model.StructuredProperty(Address, 'home')
wo = model.StructuredProperty(Address, 'work')
class Person(model.Model):
na = model.StringProperty('name')
ad = model.StructuredProperty(AddressPair, 'address')
p = Person()
p.na = 'Google'
p.ad = AddressPair(ho=Address(), wo=Address())
p.ad.ho.ci = 'Mountain View'
p.ad.ho.st = '1600 Amphitheatre'
p.ad.wo.ci = 'San Francisco'
p.ad.wo.st = '345 Spear'
pb = p.ToPb()
self.assertEqual(str(pb), NESTED_PB)
p = Person()
p.FromPb(pb)
self.assertEqual(p.na, 'Google')
self.assertEqual(p.ad.ho.st, '1600 Amphitheatre')
self.assertEqual(p.ad.ho.ci, 'Mountain View')
self.assertEqual(p.ad.wo.st, '345 Spear')
self.assertEqual(p.ad.wo.ci, 'San Francisco')
def testKindMap(self):
model.Model.ResetKindMap()
class A1(model.Model):
pass
self.assertEqual(model.Model.GetKindMap(), {'A1': A1})
class A2(model.Model):
pass
self.assertEqual(model.Model.GetKindMap(), {'A1': A1, 'A2': A2})
def testMultipleProperty(self):
class Person(model.Model):
name = model.StringProperty()
address = model.StringProperty(repeated=True)
m = Person(name='Google', address=['345 Spear', 'San Francisco'])
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address, ['345 Spear', 'San Francisco'])
pb = m.ToPb()
self.assertEqual(str(pb), MULTI_PB)
m2 = Person()
m2.FromPb(pb)
self.assertEqual(m2, m)
def testMultipleInStructuredProperty(self):
class Address(model.Model):
label = model.StringProperty()
line = model.StringProperty(repeated=True)
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address)
m = Person(name='Google',
address=Address(label='work',
line=['345 Spear', 'San Francisco']))
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address.line, ['345 Spear', 'San Francisco'])
pb = m.ToPb()
self.assertEqual(str(pb), MULTIINSTRUCT_PB)
m2 = Person()
m2.FromPb(pb)
self.assertEqual(m2, m)
def testMultipleStructuredProperty(self):
class Address(model.Model):
label = model.StringProperty()
text = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address, repeated=True)
m = Person(name='Google',
address=[Address(label='work', text='San Francisco'),
Address(label='home', text='Mountain View')])
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address[0].label, 'work')
self.assertEqual(m.address[0].text, 'San Francisco')
self.assertEqual(m.address[1].label, 'home')
self.assertEqual(m.address[1].text, 'Mountain View')
pb = m.ToPb()
self.assertEqual(str(pb), MULTISTRUCT_PB)
m2 = Person()
m2.FromPb(pb)
self.assertEqual(m2, m)
def testCannotMultipleInMultiple(self):
class Inner(model.Model):
innerval = model.StringProperty(repeated=True)
self.assertRaises(AssertionError,
model.StructuredProperty, Inner, repeated=True)
def testNullProperties(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
zip = model.IntegerProperty()
class Person(model.Model):
address = model.StructuredProperty(Address)
age = model.IntegerProperty()
name = model.StringProperty()
k = model.KeyProperty()
k = model.Key(flat=['Person', 42])
p = Person()
p.key = k
self.assertEqual(p.address, None)
self.assertEqual(p.age, None)
self.assertEqual(p.name, None)
self.assertEqual(p.k, None)
pb = p.ToPb()
q = Person()
q.FromPb(pb)
self.assertEqual(q.address, None)
self.assertEqual(q.age, None)
self.assertEqual(q.name, None)
self.assertEqual(q.k, None)
self.assertEqual(q, p)
def testOrphanProperties(self):
class Tag(model.Model):
names = model.StringProperty(repeated=True)
ratings = model.IntegerProperty(repeated=True)
class Address(model.Model):
line = model.StringProperty(repeated=True)
city = model.StringProperty()
zip = model.IntegerProperty()
tags = model.StructuredProperty(Tag)
class Person(model.Model):
address = model.StructuredProperty(Address)
age = model.IntegerProperty(repeated=True)
name = model.StringProperty()
k = model.KeyProperty()
k = model.Key(flat=['Person', 42])
p = Person(name='White House', k=k, age=[210, 211],
address=Address(line=['1600 Pennsylvania', 'Washington, DC'],
tags=Tag(names=['a', 'b'], ratings=[1, 2]),
zip=20500))
p.key = k
pb = p.ToPb()
q = model.Model()
q.FromPb(pb)
qb = q.ToPb()
linesp = str(pb).splitlines(True)
linesq = str(qb).splitlines(True)
lines = difflib.unified_diff(linesp, linesq, 'Expected', 'Actual')
self.assertEqual(pb, qb, ''.join(lines))
def testModelRepr(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address)
p = Person(name='Google', address=Address(street='345 Spear', city='SF'))
self.assertEqual(
repr(p),
"Person(address=Address(city='SF', street='345 Spear'), name='Google')")
p.key = model.Key(pairs=[('Person', 42)])
self.assertEqual(
repr(p),
"Person(key=Key('Person', 42), "
"address=Address(city='SF', street='345 Spear'), name='Google')")
def testModelRepr_RenamedProperty(self):
class Address(model.Model):
street = model.StringProperty('Street')
city = model.StringProperty('City')
a = Address(street='345 Spear', city='SF')
self.assertEqual(repr(a), "Address(city='SF', street='345 Spear')")
def testModel_RenameAlias(self):
class Person(model.Model):
name = model.StringProperty('Name')
p = Person(name='Fred')
self.assertRaises(AttributeError, getattr, p, 'Name')
self.assertRaises(AttributeError, Person, Name='Fred')
# Unfortunately, p.Name = 'boo' just sets p.__dict__['Name'] = 'boo'.
self.assertRaises(AttributeError, getattr, p, 'foo')
def testExpando_RenameAlias(self):
class Person(model.Expando):
name = model.StringProperty('Name')
p = Person(name='Fred')
self.assertEqual(p.name, 'Fred')
self.assertEqual(p.Name, 'Fred')
self.assertEqual(p._values, {'Name': 'Fred'})
self.assertTrue(p._properties, Person._properties)
p = Person(Name='Fred')
self.assertEqual(p.name, 'Fred')
self.assertEqual(p.Name, 'Fred')
self.assertEqual(p._values, {'Name': 'Fred'})
self.assertTrue(p._properties, Person._properties)
p = Person()
p.Name = 'Fred'
self.assertEqual(p.name, 'Fred')
self.assertEqual(p.Name, 'Fred')
self.assertEqual(p._values, {'Name': 'Fred'})
self.assertTrue(p._properties, Person._properties)
self.assertRaises(AttributeError, getattr, p, 'foo')
def testModel_RenameSwap(self):
class Person(model.Model):
foo = model.StringProperty('bar')
bar = model.StringProperty('foo')
p = Person(foo='foo', bar='bar')
self.assertEqual(p._values,
{'foo': 'bar', 'bar': 'foo'})
def testExpando_RenameSwap(self):
class Person(model.Expando):
foo = model.StringProperty('bar')
bar = model.StringProperty('foo')
p = Person(foo='foo', bar='bar', baz='baz')
self.assertEqual(p._values,
{'foo': 'bar', 'bar': 'foo', 'baz': 'baz'})
p = Person()
p.foo = 'foo'
p.bar = 'bar'
p.baz = 'baz'
self.assertEqual(p._values,
{'foo': 'bar', 'bar': 'foo', 'baz': 'baz'})
def testPropertyRepr(self):
p = model.Property()
self.assertEqual(repr(p), 'Property()')
p = model.IntegerProperty('foo', indexed=False, repeated=True)
self.assertEqual(repr(p),
"IntegerProperty('foo', indexed=False, repeated=True)")
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
p = model.StructuredProperty(Address, 'foo')
self.assertEqual(repr(p), "StructuredProperty(Address, 'foo')")
def testValidation(self):
class All(model.Model):
s = model.StringProperty()
i = model.IntegerProperty()
f = model.FloatProperty()
t = model.TextProperty()
b = model.BlobProperty()
k = model.KeyProperty()
BVE = datastore_errors.BadValueError
a = All()
a.s = None
a.s = 'abc'
a.s = u'def'
a.s = '\xff' # Not UTF-8.
self.assertRaises(BVE, setattr, a, 's', 0)
a.i = None
a.i = 42
a.i = 123L
self.assertRaises(BVE, setattr, a, 'i', '')
a.f = None
a.f = 42
a.f = 3.14
self.assertRaises(BVE, setattr, a, 'f', '')
a.t = None
a.t = 'abc'
a.t = u'def'
a.t = '\xff' # Not UTF-8.
self.assertRaises(BVE, setattr, a, 't', 0)
a.b = None
a.b = 'abc'
a.b = '\xff'
self.assertRaises(BVE, setattr, a, 'b', u'')
self.assertRaises(BVE, setattr, a, 'b', u'')
a.k = None
a.k = model.Key('Foo', 42)
self.assertRaises(BVE, setattr, a, 'k', '')
def testLocalStructuredProperty(self):
class Address(model.Model):
street = model.StringProperty()
city = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.LocalStructuredProperty(Address)
p = Person()
p.name = 'Google'
a = Address(street='1600 Amphitheatre')
p.address = a
p.address.city = 'Mountain View'
self.assertEqual(Person.name.GetValue(p), 'Google')
self.assertEqual(p.name, 'Google')
self.assertEqual(Person.address.GetValue(p), a)
self.assertEqual(Address.street.GetValue(a), '1600 Amphitheatre')
self.assertEqual(Address.city.GetValue(a), 'Mountain View')
pb = p.ToPb()
# TODO: Validate pb
# Check we can enable and disable compression and have old data still
# be understood.
Person.address._compressed = True
p = Person()
p.FromPb(pb)
self.assertEqual(p.name, 'Google')
self.assertEqual(p.address.street, '1600 Amphitheatre')
self.assertEqual(p.address.city, 'Mountain View')
self.assertEqual(p.address, a)
self.assertEqual(repr(Person.address),
"LocalStructuredProperty(Address, 'address', "
"compressed=True)")
pb = p.ToPb()
Person.address._compressed = False
p = Person()
p.FromPb(pb)
# Now try with an empty address
p = Person()
p.name = 'Google'
self.assertTrue(p.address is None)
pb = p.ToPb()
p = Person()
p.FromPb(pb)
self.assertTrue(p.address is None)
self.assertEqual(p.name, 'Google')
def testEmptyList(self):
class Person(model.Model):
name = model.StringProperty(repeated=True)
p = Person()
self.assertEqual(p.name, [])
pb = p.ToPb()
q = Person()
q.FromPb(pb)
self.assertEqual(q.name, [], str(pb))
def testEmptyListSerialized(self):
class Person(model.Model):
name = model.StringProperty(repeated=True)
p = Person()
pb = p.ToPb()
q = Person()
q.FromPb(pb)
self.assertEqual(q.name, [], str(pb))
def testDatetimeSerializing(self):
class Person(model.Model):
t = model.GenericProperty()
p = Person(t=datetime.datetime.utcnow())
pb = p.ToPb()
q = Person()
q.FromPb(pb)
self.assertEqual(p.t, q.t)
def testExpandoRead(self):
class Person(model.Model):
name = model.StringProperty()
city = model.StringProperty()
p = Person(name='Guido', city='SF')
pb = p.ToPb()
q = model.Expando()
q.FromPb(pb)
self.assertEqual(q.name, 'Guido')
self.assertEqual(q.city, 'SF')
def testExpandoWrite(self):
k = model.Key(flat=['Model', 42])
p = model.Expando(key=k)
p.k = k
p.p = 42
p.q = 'hello'
p.u = TESTUSER
p.d = 2.5
p.b = True
p.xy = AMSTERDAM
pb = p.ToPb()
self.assertEqual(str(pb), GOLDEN_PB)
def testExpandoRepr(self):
class Person(model.Expando):
name = model.StringProperty('Name')
city = model.StringProperty('City')
p = Person(name='Guido', zip='00000')
p.city= 'SF'
self.assertEqual(repr(p),
"Person(city='SF', name='Guido', zip='00000')")
# White box confirmation.
self.assertEqual(p._values,
{'City': 'SF', 'Name': 'Guido', 'zip': '00000'})
def testExpandoNested(self):
p = model.Expando()
nest = model.Expando()
nest.foo = 42
nest.bar = 'hello'
p.nest = nest
self.assertEqual(p.nest.foo, 42)
self.assertEqual(p.nest.bar, 'hello')
pb = p.ToPb()
q = model.Expando()
q.FromPb(pb)
self.assertEqual(q.nest.foo, 42)
self.assertEqual(q.nest.bar, 'hello')
def testExpandoSubclass(self):
class Person(model.Expando):
name = model.StringProperty()
p = Person()
p.name = 'Joe'
p.age = 7
self.assertEqual(p.name, 'Joe')
self.assertEqual(p.age, 7)
def testExpandoConstructor(self):
p = model.Expando(foo=42, bar='hello')
self.assertEqual(p.foo, 42)
self.assertEqual(p.bar, 'hello')
pb = p.ToPb()
q = model.Expando()
q.FromPb(pb)
self.assertEqual(q.foo, 42)
self.assertEqual(q.bar, 'hello')
def testExpandoNestedConstructor(self):
p = model.Expando(foo=42, bar=model.Expando(hello='hello'))
self.assertEqual(p.foo, 42)
self.assertEqual(p.bar.hello, 'hello')
pb = p.ToPb()
q = model.Expando()
q.FromPb(pb)
self.assertEqual(q.foo, 42)
self.assertEqual(q.bar.hello, 'hello')
def testComputedProperty(self):
class ComputedTest(model.Model):
name = model.StringProperty()
name_lower = model.ComputedProperty(lambda self: self.name.lower())
@model.ComputedProperty
def size(self):
return len(self.name)
def _compute_hash(self):
return hash(self.name)
hash = model.ComputedProperty(_compute_hash, name='hashcode')
m = ComputedTest(name='Foobar')
pb = m.ToPb()
for p in pb.property_list():
if p.name() == 'name_lower':
self.assertEqual(p.value().stringvalue(), 'foobar')
break
else:
self.assert_(False, "name_lower not found in PB")
m = ComputedTest()
m.FromPb(pb)
self.assertEqual(m.name, 'Foobar')
self.assertEqual(m.name_lower, 'foobar')
self.assertEqual(m.size, 6)
self.assertEqual(m.hash, hash('Foobar'))
def testLargeValues(self):
class Demo(model.Model):
bytes = model.BlobProperty()
text = model.TextProperty()
x = Demo(bytes='x'*1000, text=u'a'*1000)
key = x.put()
y = key.get()
self.assertEqual(x, y)
self.assertTrue(isinstance(y.bytes, str))
self.assertTrue(isinstance(y.text, unicode))
def testMultipleStructuredProperty(self):
class Address(model.Model):
label = model.StringProperty()
text = model.StringProperty()
class Person(model.Model):
name = model.StringProperty()
address = model.StructuredProperty(Address, repeated=True)
m = Person(name='Google',
address=[Address(label='work', text='San Francisco'),
Address(label='home', text='Mountain View')])
m.key = model.Key(flat=['Person', None])
self.assertEqual(m.address[0].label, 'work')
self.assertEqual(m.address[0].text, 'San Francisco')
self.assertEqual(m.address[1].label, 'home')
self.assertEqual(m.address[1].text, 'Mountain View')
[k] = self.conn.put([m])
m.key = k # Connection.put() doesn't do this!
[m2] = self.conn.get([k])
self.assertEqual(m2, m)
def testIdAndParent(self):
# id
m = model.Model(id='bar')
self.assertEqual(m.put(), model.Key('Model', 'bar'))
# id + parent
p = model.Key('ParentModel', 'foo')
m = model.Model(id='bar', parent=p)
self.assertEqual(m.put(), model.Key('ParentModel', 'foo', 'Model', 'bar'))
def testAllocateIds(self):
class MyModel(model.Model):
pass
res = MyModel.allocate_ids(size=100)
self.assertEqual(res, (1, 100))
# with parent
key = model.Key(flat=(MyModel.GetKind(), 1))
res = MyModel.allocate_ids(size=200, parent=key)
self.assertEqual(res, (101, 300))
def testGetOrInsert(self):
class MyModel(model.Model):
text = model.StringProperty()
key = model.Key(flat=(MyModel.GetKind(), 'baz'))
self.assertEqual(key.get(), None)
MyModel.get_or_insert('baz', text='baz')
self.assertNotEqual(key.get(), None)
self.assertEqual(key.get().text, 'baz')
def testGetById(self):
class MyModel(model.Model):
pass
kind = MyModel.GetKind()
# key id
ent1 = MyModel(key=model.Key(pairs=[(kind, 1)]))
key = ent1.put()
res = MyModel.get_by_id(1)
self.assertEqual(res, ent1)
# key name
ent2 = MyModel(key=model.Key(pairs=[(kind, 'foo')]))
key = ent2.put()
res = MyModel.get_by_id('foo')
self.assertEqual(res, ent2)
# key id + parent
ent3 = MyModel(key=model.Key(pairs=[(kind, 1), (kind, 2)]))
key = ent3.put()
res = MyModel.get_by_id(2, parent=model.Key(pairs=[(kind, 1)]))
self.assertEqual(res, ent3)
# key name + parent
ent4 = MyModel(key=model.Key(pairs=[(kind, 1), (kind, 'bar')]))
key = ent4.put()
res = MyModel.get_by_id('bar', parent=ent1.key)
self.assertEqual(res, ent4)
# None
res = MyModel.get_by_id('idontexist')
self.assertEqual(res, None)
# Invalid parent
self.assertRaises(datastore_errors.BadValueError, MyModel.get_by_id,
'bar', parent=1)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Tests for pep380.py."""
import unittest
from ndb import pep380
class PEP380Tests(unittest.TestCase):
"""Test cases to verify the equivalence of yielding a generator to PEP 380.
E.g. in a pre-PEP-380 world, this:
@gwrap
def g1():
x = yield g2(0, 10)
y = yield g2(5, 20)
yield (x, y)
@gwrap # Optional
def g2(a, b):
for i in range(a, b):
yield i
raise Return(b - a)
def main():
assert list(g1()) == range(0, 10) + range(5, 20) + [(10, 15)]
should be equivalent to this in a PEP-380 world:
def g1():
x = yield from g2(0, 10)
y = yield from g2(5, 20)
yield x, y
def g2(a, b):
yield from range(a, b) # Maybe?
return b - a
def main():
assert list(g1()) == range(0, 10) + range(5, 20) + [(10, 15)]
"""
def testBasics(self):
@pep380.gwrap
def g1(a, b, c, d):
x = yield g2(a, b)
y = yield g2(c, d)
yield (x, y)
@pep380.gwrap
def g2(a, b):
for i in range(a, b):
yield i
raise pep380.Return(b - a)
actual = []
for val in g1(0, 3, 5, 7):
actual.append(val)
expected = [0, 1, 2, 5, 6, (3, 2)]
self.assertEqual(actual, expected)
def testGClose(self):
@pep380.gwrap
def foo():
total = 0
try:
while True:
total += (yield)
except GeneratorExit:
raise pep380.Return(total)
gen = foo()
gen.next()
gen.send(3)
gen.send(2)
val = pep380.gclose(gen)
self.assertEqual(val, 5)
gen = foo()
gen.next()
gen.send(3)
val = pep380.gclose(gen)
self.assertEqual(val, 3)
def testGClose_Vanilla(self):
def vanilla():
yield 1
v = vanilla()
self.assertEqual(pep380.gclose(v), None)
v = vanilla()
v.next()
self.assertEqual(pep380.gclose(v), None)
v = vanilla()
v.next()
self.assertRaises(StopIteration, v.next)
self.assertEqual(pep380.gclose(v), None)
def testGClose_KeepTrying(self):
def hard_to_get():
for i in range(5):
try:
yield i
except GeneratorExit:
continue
raise pep380.Return(42)
gen = hard_to_get()
self.assertEqual(gen.next(), 0)
self.assertEqual(gen.next(), 1)
self.assertEqual(pep380.gclose(gen), 42)
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""Some tests for datastore_rpc.py."""
import unittest
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.datastore import entity_pb
from google.appengine.datastore import datastore_rpc
from ndb import key, model, test_utils
class PendingTests(test_utils.DatastoreTest):
"""Tests for the 'pending RPC' management."""
def testBasicSetup1(self):
ent = model.Expando()
ent.foo = 'bar'
rpc = self.conn.async_put(None, [ent])
[key] = rpc.get_result()
self.assertEqual(key, model.Key(flat=['Expando', 1]))
def testBasicSetup2(self):
key = model.Key(flat=['Expando', 1])
rpc = self.conn.async_get(None, [key])
[ent] = rpc.get_result()
self.assertTrue(ent is None)
def SetUpCallHooks(self):
self.pre_args = []
self.post_args = []
apiproxy_stub_map.apiproxy.GetPreCallHooks().Append('test1',
self.PreCallHook)
apiproxy_stub_map.apiproxy.GetPostCallHooks().Append('test1',
self.PostCallHook)
def PreCallHook(self, service, call, request, response, rpc=None):
self.pre_args.append((service, call, request, response, rpc))
def PostCallHook(self, service, call, request, response,
rpc=None, error=None):
self.post_args.append((service, call, request, response, rpc, error))
def testCallHooks(self):
self.SetUpCallHooks()
key = model.Key(flat=['Expando', 1])
rpc = self.conn.async_get(None, [key])
self.assertEqual(len(self.pre_args), 1)
self.assertEqual(self.post_args, [])
[ent] = rpc.get_result()
self.assertEqual(len(self.pre_args), 1)
self.assertEqual(len(self.post_args), 1)
self.assertEqual(self.pre_args[0][:2], ('datastore_v3', 'Get'))
self.assertEqual(self.post_args[0][:2], ('datastore_v3', 'Get'))
def testCallHooks_Pending(self):
self.SetUpCallHooks()
key = model.Key(flat=['Expando', 1])
rpc = self.conn.async_get(None, [key])
self.conn.wait_for_all_pending_rpcs()
self.assertEqual(rpc.state, 2) # FINISHING
self.assertEqual(len(self.pre_args), 1)
self.assertEqual(len(self.post_args), 1) # NAILED IT!
self.assertEqual(self.conn.get_pending_rpcs(), set())
def NastyCallback(self, rpc):
[ent] = rpc.get_result()
key = model.Key(flat=['Expando', 1])
newrpc = self.conn.async_get(None, [key])
def testCallHooks_Pending_CallbackAddsMore(self):
self.SetUpCallHooks()
conf = datastore_rpc.Configuration(on_completion=self.NastyCallback)
key = model.Key(flat=['Expando', 1])
rpc = self.conn.async_get(conf, [key])
self.conn.wait_for_all_pending_rpcs()
self.assertEqual(self.conn.get_pending_rpcs(), set())
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
"""The Key class, and associated utilities.
A Key encapsulates the following pieces of information, which together
uniquely designate a (possible) entity in the App Engine datastore:
- an application id (a string)
- a namespace (a string)
- a list of one or more (kind, id) pairs where kind is a string and id
is either a string or an integer.
The appication id must always be part of the key, but since most
applications can only access their own entities, it defaults to the
current application id and you rarely need to worry about it. It must
not be empty.
The namespace designates a top-level partition of the key space for a
particular application. If you've never heard of namespaces, you can
safely ignore this feature.
Most of the action is in the (kind, id) pairs. A key must have at
least one (kind, id) pair. The last (kind, id) pair gives the kind
and the id of the entity that the key refers to, the others merely
specify a 'parent key'.
The kind is a string giving the name of the model class used to
represent the entity. (In more traditional databases this would be
the table name.) A model class is a Python class derived from
ndb.Model; see the documentation for ndb/model.py. Only the class
name itself is used as the kind. This means all your model classes
must be uniquely named within one application. You can override this
on a per-class basis.
The id is either a string or an integer. When the id is a string, the
application is in control of how it assigns ids: For example, if you
could use an email address as the id for Account entities.
To use integer ids, you must let the datastore choose a uniqe id for
an entity when it is first inserted into the datastore. You can set
the id to None to represent the key for an entity that hasn't yet been
inserted into the datastore. The final key (including the assigned
id) will be returned after the entity is successfully inserted into
the datastore.
A key for which the id of the last (kind, id) pair is set to None is
called an incomplete key. Such keys can only be used to insert
entities into the datastore.
A key with exactly one (kind, id) pair is called a toplevel key or a
root key. Toplevel keys are also used as entity groups, which play a
role in transaction management.
If there is more than one (kind, id) pair, all but the last pair
represent the 'ancestor path', also known as the key of the 'parent
entity'.
Other constraints:
- Kinds and string ids must not be empty and must be at most 500 bytes
long (after UTF-8 encoding, if given as Python unicode objects).
- Integer ids must be at least 1 and less than 2**63.
For more info about namespaces, see
http://code.google.com/appengine/docs/python/multitenancy/overview.html.
The namespace defaults to the 'default namespace' selected by the
namespace manager. To explicitly select the empty namespace pass
namespace=''.
"""
__author__ = 'guido@google.com (Guido van Rossum)'
# TODO: Change asserts to better exceptions.
import base64
import os
from google.appengine.api import datastore_errors
from google.appengine.api import namespace_manager
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import entity_pb
__all__ = ['Key']
class Key(object):
"""An immutable datastore key.
For flexibility and convenience, multiple constructor signatures are
supported.
The primary way to construct a key is using positional arguments:
- Key(kind1, id1, kind2, id2, ...).
This is shorthand for either of the following two longer forms:
- Key(pairs=[(kind1, id1), (kind2, id2), ...])
- Key(flat=[kind1, id1, kind2, id2, ...])
Either of the above constructor forms can additional pass in another
key using parent=<key>. The (kind, id) pairs of the parent key are
inserted before the (kind, id) pairs passed explicitly.
You can also construct a Key from a 'url-safe' encoded string:
- Key(urlsafe=<string>)
For esoteric purposes the following constructors exist:
- Key(reference=<reference>) -- passing in a low-level Reference object
- Key(serialized=<string>) -- passing in a serialized low-level Reference
- Key(<dict>) -- for unpickling, the same as Key(**<dict>)
The 'url-safe' string is really a websafe-base64-encoded serialized
Reference, but it's best to think of it as just an opaque unique
string.
Additional constructor keyword arguments:
- app=<string> -- specify the application id
- namespace=<string> -- specify the namespace
If a Reference is passed (using one of reference, serialized or
urlsafe), the args and namespace keywords must match what is already
present in the Reference (after decoding if necessary). The parent
keyword cannot be combined with a Refence in any form.
Keys are immutable, which means that a Key object cannot be modified
once it has been created. This is enforced by the implementation as
well as Python allows.
For access to the contents of a key, the following methods and
operations are supported:
- repr(key), str(key) -- return a string representation resembling
the shortest constructor form, omitting the app and namespace
unless they differ from the default value.
- key1 == key2, key1 != key2 -- comparison for equality between Keys.
- hash(key) -- a hash value sufficient for storing Keys in a dict.
- key.pairs() -- a list of (kind, id) pairs.
- key.flat() -- a list of flattened kind and id values, i.e.
[kind1, id1, kind2, id2, ...].
- key.app() -- the application id.
- key.id() -- the string or integer id in the last (kind, id) pair,
or None if the key is incomplete.
- key.string_id() -- the string id in the last (kind, id) pair,
or None if the key has an integer id or is incomplete.
- key.integer_id() -- the integer id in the last (kind, id) pair,
or None if the key has a string id or is incomplete.
- key.namespace() -- the namespace.
- key.kind() -- a shortcut for key.pairs()[-1][0].
- key.parent() -- a Key constructed from all but the last (kind, id)
pairs.
- key.urlsafe() -- a websafe-base64-encoded serialized Reference.
- key.serialized() -- a serialized Reference.
- key.reference() -- a Reference object. Since Reference objects are
mutable, this returns a brand new Reference object.
- key._reference() -- the Reference object contained in the Key.
The caller promises not to mutate it.
- key._pairs() -- an iterator, equivalent to iter(key.pairs()).
- key._flat() -- an iterator, equivalent to iter(key.flat()).
Keys also support interaction with the datastore; these methods are
the only ones that engage in any kind of I/O activity. For Future
objects, see the document for ndb/tasklets.py.
- key.get() -- return the entity for the Key.
- key.get_async() -- return a Future whose eventual result is
the entity for the Key.
- key.delete() -- delete the entity for the Key.
- key.delete_async() -- asynchronously delete the entity for the Key.
Keys may be pickled.
Subclassing Key is best avoided; it would be hard to get right.
"""
__slots__ = ['__reference']
def __new__(cls, *_args, **kwargs):
"""Constructor. See the class docstring for arguments."""
if _args:
if len(_args) == 1 and isinstance(_args[0], dict):
# For pickling only: one positional argument is allowed,
# giving a dict specifying the keyword arguments.
assert not kwargs
kwargs = _args[0]
else:
assert 'flat' not in kwargs
kwargs['flat'] = _args
self = super(Key, cls).__new__(cls)
self.__reference = _ConstructReference(cls, **kwargs)
return self
def __repr__(self):
"""String representation, used by str() and repr().
We produce a short string that conveys all relevant information,
suppressing app and namespace when they are equal to the default.
"""
# TODO: Instead of "Key('Foo', 1)" perhaps return "Key(Foo, 1)" ?
args = []
for item in self._flat():
if not item:
args.append('None')
elif isinstance(item, basestring):
assert isinstance(item, str) # No unicode should make it here.
args.append(repr(item))
else:
args.append(str(item))
if self.app() != _DefaultAppId():
args.append('app=%r' % self.app())
if self.namespace() != _DefaultNamespace():
args.append('namespace=%r' % self.namespace())
return 'Key(%s)' % ', '.join(args)
__str__ = __repr__
def __hash__(self):
"""Hash value, for use in dict lookups."""
# This ignores app and namespace, which is fine since hash()
# doesn't need to return a unique value -- it only needs to ensure
# that the hashes of equal keys are equal, not the other way
# around.
return hash(tuple(self._pairs()))
def __eq__(self, other):
"""Equality comparison operation."""
if not isinstance(other, Key):
return NotImplemented
return (tuple(self._pairs()) == tuple(other._pairs()) and
self.app() == other.app() and
self.namespace() == other.namespace())
def __ne__(self, other):
"""The opposite of __eq__."""
if not isinstance(other, Key):
return NotImplemented
return not self.__eq__(other)
def __getstate__(self):
"""Private API used for pickling."""
return ({'pairs': tuple(self._pairs()),
'app': self.app(),
'namespace': self.namespace()},)
def __setstate__(self, state):
"""Private API used for pickling."""
assert len(state) == 1
kwargs = state[0]
assert isinstance(kwargs, dict)
self.__reference = _ConstructReference(self.__class__, **kwargs)
def __getnewargs__(self):
"""Private API used for pickling."""
return ({'pairs': tuple(self._pairs()),
'app': self.app(),
'namespace': self.namespace()},)
def parent(self):
"""Return a Key constructed from all but the last (kind, id) pairs.
If there is only one (kind, id) pair, return None.
"""
pairs = self.pairs()
if len(pairs) <= 1:
return None
return Key(pairs=pairs[:-1], app=self.app(), namespace=self.namespace())
def root(self):
"""Return the root key. This is either self or the highest parent."""
pairs = self.pairs()
if len(pairs) <= 1:
return self
return Key(pairs=pairs[:1], app=self.app(), namespace=self.namespace())
def namespace(self):
"""Return the namespace."""
return self.__reference.name_space()
def app(self):
"""Return the application id."""
return self.__reference.app()
def id(self):
"""Return the string or integer id in the last (kind, id) pair, if any.
Returns:
A string or integer id, or None if the key is incomplete.
"""
elem = self.__reference.path().element(-1)
return elem.name() or elem.id() or None
def string_id(self):
"""Return the string id in the last (kind, id) pair, if any.
Returns:
A string id, or None if the key has an integer id or is incomplete.
"""
elem = self.__reference.path().element(-1)
return elem.name() or None
def integer_id(self):
"""Return the integer id in the last (kind, id) pair, if any.
Returns:
An integer id, or None if the key has a string id or is incomplete.
"""
elem = self.__reference.path().element(-1)
return elem.id() or None
def pairs(self):
"""Return a list of (kind, id) pairs."""
return list(self._pairs())
def _pairs(self):
"""Iterator yielding (kind, id) pairs."""
for elem in self.__reference.path().element_list():
kind = elem.type()
if elem.has_id():
idorname = elem.id()
else:
idorname = elem.name()
if not idorname:
idorname = None
yield (kind, idorname)
def flat(self):
"""Return a list of alternating kind and id values."""
return list(self._flat())
def _flat(self):
"""Iterator yielding alternating kind and id values."""
for kind, idorname in self._pairs():
yield kind
yield idorname
def kind(self):
"""Return the kind of the entity referenced.
This is the kind from the last (kind, id) pair.
"""
return self.__reference.path().element(-1).type()
def reference(self):
"""Return a copy of the Reference object for this Key.
This is a entity_pb.Reference instance -- a protocol buffer class
used by the lower-level API to the datastore.
"""
return _ReferenceFromReference(self.__reference)
def _reference(self):
"""Return the Reference object for this Key.
This is a backdoor API for internal use only. The caller should
not mutate the return value.
"""
return self.__reference
def serialized(self):
"""Return a serialized Reference object for this Key."""
return self.__reference.Encode()
def urlsafe(self):
"""Return a url-safe string encoding this Key's Reference.
This string is compatible with other APIs and languages and with
the strings used to represent Keys in GQL and in the App Engine
Admin Console.
"""
# This is 3-4x faster than urlsafe_b64decode()
urlsafe = base64.b64encode(self.__reference.Encode())
return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')
# Datastore API using the default context.
# These use local import since otherwise they'd be recursive imports.
def get(self):
"""Synchronously get the entity for this Key.
Return None if there is no such entity.
"""
return self.get_async().get_result()
def get_async(self):
"""Return a Future whose result is the entity for this Key.
If no such entity exists, a Future is still returned, and the
Future's eventual return result be None.
"""
from ndb import tasklets
return tasklets.get_context().get(self)
def delete(self):
"""Synchronously delete the entity for this Key.
This is a no-op if no such entity exists.
"""
return self.delete_async().get_result()
def delete_async(self):
"""Schedule deletion of the entity for this Key.
This returns a Future, whose result becomes available once the
deletion is complete. If no such entity exists, a Future is still
returned. In all cases the Future's result is None (i.e. there is
no way to tell whether the entity existed or not).
"""
from ndb import tasklets
return tasklets.get_context().delete(self)
# The remaining functions in this module are private.
@datastore_rpc._positional(1)
def _ConstructReference(cls, pairs=None, flat=None,
reference=None, serialized=None, urlsafe=None,
app=None, namespace=None, parent=None):
"""Construct a Reference; the signature is the same as for Key."""
assert cls is Key
howmany = (bool(pairs) + bool(flat) +
bool(reference) + bool(serialized) + bool(urlsafe))
assert howmany == 1
if flat or pairs:
if flat:
assert len(flat) % 2 == 0
pairs = [(flat[i], flat[i+1]) for i in xrange(0, len(flat), 2)]
assert pairs
if parent is not None:
if not isinstance(parent, Key):
raise datastore_errors.BadValueError(
'Expected Key instance, got %r' % parent)
pairs[:0] = parent.pairs()
if app:
assert app == parent.app(), (app, parent.app())
else:
app = parent.app()
if namespace is not None:
assert namespace == parent.namespace(), (namespace,
parent.namespace())
else:
namespace = parent.namespace()
reference = _ReferenceFromPairs(pairs, app=app, namespace=namespace)
else:
# You can't combine parent= with reference=, serialized= or urlsafe=.
assert parent is None
if urlsafe:
serialized = _DecodeUrlSafe(urlsafe)
if serialized:
reference = _ReferenceFromSerialized(serialized)
assert reference.path().element_size()
# TODO: assert that each element has a type and either an id or a name
if not serialized:
reference = _ReferenceFromReference(reference)
# You needn't specify app= or namespace= together with reference=,
# serialized= or urlsafe=, but if you do, their values must match
# what is already in the reference.
if app is not None:
assert app == reference.app(), (app, reference.app())
if namespace is not None:
assert namespace == reference.name_space(), (namespace,
reference.name_space())
return reference
def _ReferenceFromPairs(pairs, reference=None, app=None, namespace=None):
"""Construct a Reference from a list of pairs.
If a Reference is passed in as the second argument, it is modified
in place. The app and namespace are set from the corresponding
keyword arguments, with the customary defaults.
"""
if reference is None:
reference = entity_pb.Reference()
path = reference.mutable_path()
last = False
for kind, idorname in pairs:
if last:
raise datastore_errors.BadArgumentError(
'Incomplete Key entry must be last')
if not isinstance(kind, basestring):
if isinstance(kind, type):
# Late import to avoid cycles.
from ndb.model import Model
modelclass = kind
assert issubclass(modelclass, Model), repr(modelclass)
kind = modelclass.GetKind()
assert isinstance(kind, basestring), (repr(modelclass), repr(kind))
if isinstance(kind, unicode):
kind = kind.encode('utf8')
assert 1 <= len(kind) <= 500
elem = path.add_element()
elem.set_type(kind)
if isinstance(idorname, (int, long)):
assert 1 <= idorname < 2**63
elem.set_id(idorname)
elif isinstance(idorname, basestring):
if isinstance(idorname, unicode):
idorname = idorname.encode('utf8')
assert 1 <= len(idorname) <= 500
elem.set_name(idorname)
elif idorname is None:
elem.set_id(0)
last = True
else:
assert False, 'bad idorname (%r)' % (idorname,)
# An empty app id means to use the default app id.
if not app:
app = _DefaultAppId()
# Always set the app id, since it is mandatory.
reference.set_app(app)
# An empty namespace overrides the default namespace.
if namespace is None:
namespace = _DefaultNamespace()
# Only set the namespace if it is not empty.
if namespace:
reference.set_name_space(namespace)
return reference
def _ReferenceFromReference(reference):
"""Copy a Reference."""
new_reference = entity_pb.Reference()
new_reference.CopyFrom(reference)
return new_reference
def _ReferenceFromSerialized(serialized):
"""Construct a Reference from a serialized Reference."""
assert isinstance(serialized, basestring)
if isinstance(serialized, unicode):
serialized = serialized.encode('utf8')
return entity_pb.Reference(serialized)
def _DecodeUrlSafe(urlsafe):
"""Decode a url-safe base64-encoded string.
This returns the decoded string.
"""
assert isinstance(urlsafe, basestring)
if isinstance(urlsafe, unicode):
urlsafe = urlsafe.encode('utf8')
mod = len(urlsafe) % 4
if mod:
urlsafe += '=' * (4 - mod)
# This is 3-4x faster than urlsafe_b64decode()
return base64.b64decode(urlsafe.replace('-', '+').replace('_', '/'))
def _DefaultAppId():
"""Return the default application id.
This is taken from the APPLICATION_ID environment variable.
"""
return os.getenv('APPLICATION_ID', '_')
def _DefaultNamespace():
"""Return the default namespace.
This is taken from the namespace manager.
"""
return namespace_manager.get_namespace()
| Python |
"""Tests for key.py."""
import base64
import pickle
import unittest
from google.appengine.api import datastore_errors
from google.appengine.datastore import entity_pb
from ndb import key
class KeyTests(unittest.TestCase):
def testShort(self):
k0 = key.Key('Kind', None)
self.assertEqual(k0.flat(), ['Kind', None])
k1 = key.Key('Kind', 1)
self.assertEqual(k1.flat(), ['Kind', 1])
k2 = key.Key('Parent', 42, 'Kind', 1)
self.assertEqual(k2.flat(), ['Parent', 42, 'Kind', 1])
def testFlat(self):
flat = ['Kind', 1]
pairs = [(flat[i], flat[i+1]) for i in xrange(0, len(flat), 2)]
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Kind')
def testFlatLong(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
pairs = [(flat[i], flat[i+1]) for i in xrange(0, len(flat), 2)]
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Subkind')
def testSerialized(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
r = entity_pb.Reference()
r.set_app('_')
e = r.mutable_path().add_element()
e.set_type(flat[0])
e.set_id(flat[1])
e = r.mutable_path().add_element()
e.set_type(flat[2])
e.set_name(flat[3])
serialized = r.Encode()
urlsafe = base64.urlsafe_b64encode(r.Encode()).rstrip('=')
k = key.Key(flat=flat)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(urlsafe=urlsafe)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(serialized=serialized)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r)
self.assertTrue(k._reference() is not r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r, app=r.app(), namespace='')
self.assertTrue(k._reference() is not r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
def testId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.id(), 'foo')
k2 = key.Key('Subkind', 42, parent=k1)
self.assertEqual(k2.id(), 42)
k3 = key.Key('Subkind', 'bar', parent=k2)
self.assertEqual(k3.id(), 'bar')
# incomplete key
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.id(), None)
def testStringId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.string_id(), 'foo')
k2 = key.Key('Subkind', 'bar', parent=k1)
self.assertEqual(k2.string_id(), 'bar')
k3 = key.Key('Subkind', 42, parent=k2)
self.assertEqual(k3.string_id(), None)
# incomplete key
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.string_id(), None)
def testIntegerId(self):
k1 = key.Key('Kind', 42, app='app1', namespace='ns1')
self.assertEqual(k1.integer_id(), 42)
k2 = key.Key('Subkind', 43, parent=k1)
self.assertEqual(k2.integer_id(), 43)
k3 = key.Key('Subkind', 'foobar', parent=k2)
self.assertEqual(k3.integer_id(), None)
# incomplete key
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.integer_id(), None)
def testParent(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.parent(), None)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ['Kind', 1, 'Subkind', 'foobar'])
self.assertEqual(k.parent(), p)
k = key.Key('Subkind', 'foobar', parent=p,
app=p.app(), namespace=p.namespace())
self.assertEqual(k.flat(), ['Kind', 1, 'Subkind', 'foobar'])
self.assertEqual(k.parent(), p)
def testRoot(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.root(), p)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ['Kind', 1, 'Subkind', 'foobar'])
self.assertEqual(k.root(), p)
k2 = key.Key('Subsubkind', 42, parent=k,
app=p.app(), namespace=p.namespace())
self.assertEqual(k2.flat(), ['Kind', 1,
'Subkind', 'foobar',
'Subsubkind', 42])
self.assertEqual(k2.root(), p)
def testRepr_Inferior(self):
k = key.Key('Kind', 1L, 'Subkind', 'foobar')
self.assertEqual(repr(k),
"Key('Kind', 1, 'Subkind', 'foobar')")
self.assertEqual(repr(k), str(k))
def testRepr_Toplevel(self):
k = key.Key('Kind', 1)
self.assertEqual(repr(k), "Key('Kind', 1)")
def testRepr_Incomplete(self):
k = key.Key('Kind', None)
self.assertEqual(repr(k), "Key('Kind', None)")
def testRepr_UnicodeKind(self):
k = key.Key(u'\u1234', 1)
self.assertEqual(repr(k), "Key('\\xe1\\x88\\xb4', 1)")
def testRepr_UnicodeId(self):
k = key.Key('Kind', u'\u1234')
self.assertEqual(repr(k), "Key('Kind', '\\xe1\\x88\\xb4')")
def testRepr_App(self):
k = key.Key('Kind', 1, app='foo')
self.assertEqual(repr(k), "Key('Kind', 1, app='foo')")
def testRepr_Namespace(self):
k = key.Key('Kind', 1, namespace='foo')
self.assertEqual(repr(k), "Key('Kind', 1, namespace='foo')")
def testUnicode(self):
flat_input = [u'Kind\u1234', 1, 'Subkind', u'foobar\u4321']
flat = [flat_input[0].encode('utf8'), flat_input[1],
flat_input[2], flat_input[3].encode('utf8')]
pairs = [(flat[i], flat[i+1]) for i in xrange(0, len(flat), 2)]
k = key.Key(flat=flat_input)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
# TODO: test these more thoroughly
r = k.reference()
serialized = k.serialized()
urlsafe = k.urlsafe()
key.Key(urlsafe=urlsafe.decode('utf8'))
key.Key(serialized=serialized.decode('utf8'))
key.Key(reference=r)
# TODO: this may not make sense -- the protobuf utf8-encodes values
r = entity_pb.Reference()
r.set_app('_')
e = r.mutable_path().add_element()
e.set_type(flat_input[0])
e.set_name(flat_input[3])
k = key.Key(reference=r)
self.assertEqual(k.reference(), r)
def testHash(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
pairs = [(flat[i], flat[i+1]) for i in xrange(0, len(flat), 2)]
k = key.Key(flat=flat)
self.assertEqual(hash(k), hash(tuple(pairs)))
def testPickling(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
pairs = [(flat[i], flat[i+1]) for i in xrange(0, len(flat), 2)]
k = key.Key(flat=flat)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(k, protocol=proto)
kk = pickle.loads(s)
self.assertEqual(k, kk)
def testIncomplete(self):
k = key.Key(flat=['Kind', None])
self.assertRaises(datastore_errors.BadArgumentError,
key.Key, flat=['Kind', None, 'Subkind', 1])
self.assertRaises(AssertionError, key.Key, flat=['Kind', ()])
def testKindFromModel(self):
from ndb import model
class M(model.Model):
pass
class N(model.Model):
@classmethod
def GetKind(cls):
return 'NN'
k = key.Key(M, 1)
self.assertEqual(k, key.Key('M', 1))
k = key.Key('X', 1, N, 2, 'Y', 3)
self.assertEqual(k, key.Key('X', 1, 'NN', 2, 'Y', 3))
def main():
unittest.main()
if __name__ == '__main__':
main()
| Python |
# This file intentionally left blank.
| Python |
"""Model and Property classes and associated stuff.
A model class represents the structure of entities stored in the
datastore. Applications define model classes to indicate the
structure of their entities, then instantiate those model classes
to create entities.
All model classes must inherit (directly or indirectly) from Model.
Through the magic of metaclasses, straightforward assignments in the
model class definition can be used to declare the model's structure:
class Person(Model):
name = StringProperty()
age = IntegerProperty()
We can now create a Person entity and write it to the datastore:
p = Person(name='Arthur Dent', age=42)
k = p.put()
The return value from put() is a Key (see the documentation for
ndb/key.py), which can be used to retrieve the same entity later:
p2 = k.get()
assert p2 == p
To update an entity, simple change its attributes and write it back
(note that this doesn't change the key):
p2.name = 'Arthur Philip Dent'
p2.put()
We can also delete an entity (by using the key):
k.delete()
The property definitions in the class body tell the system the names
and the types of the fields to be stored in the datastore, whether
they must be indexed, their default value, and more.
Many different Property types exist, including StringProperty (short
strings), IntegerProperty (64-bit signed integers), FloatProperty
(double precision floating point numbers), BooleanProperty (bool
values), and DateTimeProperty (a datetime object -- note that App
Engine always uses UTC for a timezone). Some more specialized
properties also exist: TextProperty represents a longer string that is
not indexed (StringProperty is limited to 500 bytes); BlobProperty
represents an uninterpreted, unindexed byte string; KeyProperty
represents a datastore Key; DateProperty and TimeProperty represent
dates and times separately (although usually DateTimeProperty is more
convenient); GeoPtProperty represents a geographical point (i.e., a
(latitude, longitude) pair); and UserProperty represents a User object
(for backwards compatibility with existing datastore schemas only: we
do not recommend storing User objects directly in the datastore, but
recommend instead storing the user.user_id() value).
Finally, StructuredProperty represents a field that is itself
structured like an entity -- more about these later. And
LocalStructuredProperty is similar at the Python level, but unindexed,
and its on-disk representation is an unencoded blob.
Most Property classes have similar constructor signatures. They
accept several optional keyword arguments: name=<string> to change the
name used to store the property value in the datastore,
indexed=<boolean> to indicate whether the property should be indexed
(allowing queries on this property's value), and repeated=<boolean> to
indicate that this property can have multiple values in the same
entity. Repeated properties are always represented using Python
lists; if there is only one value, the list has only one element.
The StructuredProperty is different; it lets you define a
sub-structure for your entities. The substructure itself is defined
using a model class, and the attribute value is an instance of that
model class. However it is not stored in the datastore as a separate
entity; instead, its attribute values are included in the parent
entity using a naming convention (the name of the structured attribute
followed by a dot followed by the name of the subattribute). For
example:
class Address(Model):
street = StringProperty()
city = StringProperty()
class Person(Model):
name = StringProperty()
addr = StructuredProperty(Address)
p = Person(name='Harry Potter',
address=Address(street='4 Privet Drive', city='Little Whinging'))
k.put()
This would write a single 'Person' entity with three attributes (as
you could verify using the Datastore Viewer in the Admin Console):
name = 'Harry Potter'
address.street = '4 Privet Drive'
address.city = 'Little Whinging'
Structured property types can be nested and have the repeated flag
set, but in a hierarchy of nested structured property types, only one
level can be repeated. It is fine to have multiple structured
properties referencing the same model class.
It is also fine to use the same model class both as a top-level entity
class and as for a structured property; however queries for the model
class will only return the top-level entities.
TODO: Document Expando.
TODO: Document Query support.
"""
__author__ = 'guido@google.com (Guido van Rossum)'
# TODO: docstrings.
# TODO: Change asserts to better exceptions.
# TODO: rename CapWords methods of Model to _underscore_names.
# TODO: add _underscore aliases to lowercase_names Model methods.
# TODO: reject unknown property names in assignment (for Model) (?)
# TODO: default, validator, choices arguments to Property.__init__().
# TODO: BlobKeyProperty.
# TODO: Possibly the (rarely used) tagged values:
# Category, Link, Email, IM, PhoneNumber, PostalAddress, Rating.
import copy
import datetime
import logging
import zlib
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import users
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import entity_pb
import ndb.key
# NOTE: Don't import ndb.query here; it would cause circular import
# problems. It is imported dynamically as needed.
Key = ndb.key.Key # For export.
# Property and Error classes are added later.
__all__ = ['Key', 'ModelAdapter', 'MetaModel', 'Model', 'Expando']
class KindError(datastore_errors.BadValueError):
"""Raised when an implementation for a kind can't be found."""
class ComputedPropertyError(datastore_errors.Error):
"""Raised when attempting to assign a value to a computed property."""
class ModelAdapter(datastore_rpc.AbstractAdapter):
"""Conversions between 'our' Key and Model classes and protobufs.
This is needed to construct a Connection object, which in turn is
needed to construct a Context object.
See the base class docstring for more info about the signatures.
"""
def __init__(self, default_model=None):
"""Constructor.
Args:
default_model: If an implementation for the kind cannot be found, use this
model class. If none is specified, an exception will be thrown
(default).
"""
self.default_model = default_model
def pb_to_key(self, pb):
return Key(reference=pb)
def key_to_pb(self, key):
return key.reference()
def pb_to_entity(self, pb):
kind = None
if pb.has_key():
# TODO: Fix the inefficiency here: we extract the key just so we
# can get the kind just so we can find the intended model class,
# but the key is extracted again and stored in the entity by FromPb().
key = Key(reference=pb.key())
kind = key.kind()
modelclass = Model._kind_map.get(kind, self.default_model)
if modelclass is None:
raise KindError("No implementation found for kind '%s'" % kind)
ent = modelclass()
ent.FromPb(pb)
return ent
def entity_to_pb(self, ent):
pb = ent.ToPb()
return pb
def make_connection(config=None, default_model=None):
"""Create a new Connection object with the right adapter.
Optionally you can pass in a datastore_rpc.Configuration object.
"""
return datastore_rpc.Connection(
adapter=ModelAdapter(default_model),
config=config)
class MetaModel(type):
"""Metaclass for Model.
This exists to fix up the properties -- they need to know their name.
This is accomplished by calling the class's FixProperties() method.
"""
def __init__(cls, name, bases, classdict):
super(MetaModel, cls).__init__(name, bases, classdict)
cls.FixUpProperties()
class Model(object):
"""A class describing datastore entities.
Model instances are usually called entities. All model classes
inheriting from Model automatically have MetaModel as their
metaclass, so that the properties are fixed up properly after the
class once the class is defined.
Because of this, you cannot use the same Property object to describe
multiple properties -- you must create separate Property objects for
each property. E.g. this does not work:
wrong_prop = StringProperty()
class Wrong(Model):
wrong1 = wrong_prop
wrong2 = wrong_prop
The kind is normally equal to the class name (exclusive of the
module name or any other parent scope). To override the kind,
define a class method named GetKind(), as follows:
class MyModel(Model):
@classmethod
def GetKind(cls):
return 'AnotherKind'
"""
__metaclass__ = MetaModel
# TODO: Prevent accidental attribute assignments
# Class variables updated by FixUpProperties()
_properties = None
_has_repeated = False
_kind_map = {} # Dict mapping {kind: Model subclass}
# Defaults for instance variables.
_key = None
_values = None
# TODO: Make _ versions of all methods, and make non-_ versions
# simple aliases. That way the _ version is still accessible even if
# the non-_ version has been obscured by a property.
# TODO: Distinguish between purposes: to call FromPb() or setvalue() etc.
@datastore_rpc._positional(1)
def __init__(self, key=None, id=None, parent=None, **kwds):
"""Creates a new instance of this model.
Args:
key: Key instance for this model. If key is used, id and parent must
be None.
id: Key id for this model. If id is used, key must be None.
parent: Key instance for the parent model or None for a top-level one.
If parent is used, key must be None.
**kwds: Keyword arguments mapping to properties of this model.
"""
if key is not None:
if id is not None:
raise datastore_errors.BadArgumentError(
'Model constructor accepts key or id, not both.')
if parent is not None:
raise datastore_errors.BadArgumentError(
'Model constructor accepts key or parent, not both.')
# Using _setkey() here to trigger the basic Key checks.
# self.key = key doesn't work because of Expando's __setattr__().
self._setkey(key)
elif id is not None or parent is not None:
# When parent is set but id is not, we have an incomplete key.
# Key construction will fail with invalid ids or parents, so no check
# is needed.
# TODO: should this be restricted to string ids?
self._key = Key(self.GetKind(), id, parent=parent)
self._values = {}
self.SetAttributes(kwds)
def SetAttributes(self, kwds):
cls = self.__class__
for name, value in kwds.iteritems():
prop = getattr(cls, name) # Raises AttributeError for unknown properties.
assert isinstance(prop, Property)
prop.SetValue(self, value)
def __repr__(self):
args = []
done = set()
for prop in self._properties.itervalues():
if prop.HasValue(self):
args.append('%s=%r' % (prop._code_name, prop.RetrieveValue(self)))
done.add(prop._name)
args.sort()
if self._key is not None:
args.insert(0, 'key=%r' % self._key)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
# TODO: Make kind a property also?
@classmethod
def GetKind(cls):
return cls.__name__
@classmethod
def GetKindMap(cls):
return cls._kind_map
def has_complete_key(self):
"""Return whether this model has a complete key."""
return self._key is not None and self._key.id() is not None
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not None:
if not isinstance(key, Key):
raise datastore_errors.BadValueError(
'Expected Key instance, got %r' % key)
if self.__class__ not in (Model, Expando):
if key.kind() != self.GetKind():
raise KindError('Expected Key kind to be %s; received %s' %
(self.GetKind(), key.kind()))
self._key = key
def _delkey(self):
self._key = None
key = property(_getkey, _setkey, _delkey)
def __hash__(self):
raise TypeError('Model is not immutable')
def __eq__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
# It's okay to use private names -- we're the same class
if self._key != other._key:
# TODO: If one key is None and the other is an explicit
# incomplete key of the simplest form, this should be OK.
return False
# TODO: Turn the rest of this into an Equivalent() method.
# Ignore differences in values that are None.
self_values = [(name, value)
for name, value in self._values.iteritems()
if value is not None]
self_values.sort()
other_values = [(name, value)
for name, value in other._values.iteritems()
if value is not None]
other_values.sort()
return self_values == other_values
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
return not eq
# TODO: Refactor ToPb() so pb is an argument?
def ToPb(self):
pb = entity_pb.EntityProto()
# TODO: Move the key stuff into ModelAdapter.entity_to_pb()?
key = self._key
if key is None:
pairs = [(self.GetKind(), None)]
ref = ndb.key._ReferenceFromPairs(pairs, reference=pb.mutable_key())
else:
ref = key._reference() # Don't copy
pb.mutable_key().CopyFrom(ref)
group = pb.mutable_entity_group()
elem = ref.path().element(0)
if elem.id() or elem.name():
group.add_element().CopyFrom(elem)
for name, prop in sorted(self._properties.iteritems()):
prop.Serialize(self, pb)
return pb
# TODO: Make this a class method?
def FromPb(self, pb):
assert not self._key
assert not self._values
assert isinstance(pb, entity_pb.EntityProto)
# TODO: Move the key stuff into ModelAdapter.pb_to_entity()?
if pb.has_key():
self._key = Key(reference=pb.key())
indexed_properties = pb.property_list()
unindexed_properties = pb.raw_property_list()
for plist in [indexed_properties, unindexed_properties]:
for p in plist:
prop = self.GetPropertyFor(p, plist is indexed_properties)
prop.Deserialize(self, p)
def GetPropertyFor(self, p, indexed=True, depth=0):
name = p.name()
parts = name.split('.')
assert len(parts) > depth, (p.name(), parts, depth)
next = parts[depth]
prop = self._properties.get(next)
if prop is None:
prop = self.FakeProperty(p, next, indexed)
return prop
def CloneProperties(self):
cls = self.__class__
if self._properties is cls._properties:
self._properties = dict(cls._properties)
def FakeProperty(self, p, next, indexed=True):
self.CloneProperties()
if p.name() != next and not p.name().endswith('.' + next):
prop = StructuredProperty(Expando, next)
self._values[prop._name] = Expando()
else:
prop = GenericProperty(next,
repeated=p.multiple(),
indexed=indexed)
self._properties[prop._name] = prop
return prop
@classmethod
def FixUpProperties(cls):
# NOTE: This is called by MetaModel, but may also be called manually
# after dynamically updating a model class.
cls._properties = {} # Map of {name: Property}
if cls.__module__ == __name__: # Skip the classes in *this* file.
return
for name in set(dir(cls)):
prop = getattr(cls, name, None)
if isinstance(prop, Property):
assert not name.startswith('_')
# TODO: Tell prop the class, for error message.
prop.FixUp(name)
if prop._repeated:
cls._has_repeated = True
cls._properties[prop._name] = prop
cls._kind_map[cls.GetKind()] = cls
@classmethod
def ResetKindMap(cls):
cls._kind_map.clear()
@classmethod
def query(cls, *args, **kwds):
from ndb.query import Query # Import late to avoid circular imports.
qry = Query(kind=cls.GetKind(), **kwds)
if args:
qry = qry.filter(*args)
return qry
# Datastore API using the default context.
# These use local import since otherwise they'd be recursive imports.
def put(self):
return self.put_async().get_result()
def put_async(self):
from ndb import tasklets
return tasklets.get_context().put(self)
@classmethod
def get_or_insert(cls, name, parent=None, **kwds):
"""Transactionally retrieves an existing entity or creates a new one.
Args:
name: Key name to retrieve or create.
parent: Parent entity key, if any.
**kwds: Keyword arguments to pass to the constructor of the model class
if an instance for the specified key name does not already exist. If
an instance with the supplied key_name and parent already exists,
these arguments will be discarded.
Returns:
Existing instance of Model class with the specified key name and parent
or a new one that has just been created.
"""
return cls.get_or_insert_async(name=name, parent=parent,
**kwds).get_result()
@classmethod
def get_or_insert_async(cls, name, parent=None, **kwds):
"""Transactionally retrieves an existing entity or creates a new one.
This is the asynchronous version of Model.get_or_insert().
"""
from ndb import tasklets
ctx = tasklets.get_context()
return ctx.get_or_insert(cls, name=name, parent=parent, **kwds)
@classmethod
def allocate_ids(cls, size=None, max=None, parent=None):
"""Allocates a range of key IDs for this model class.
Args:
size: Number of IDs to allocate. Either size or max can be specified,
not both.
max: Maximum ID to allocate. Either size or max can be specified,
not both.
parent: Parent key for which the IDs will be allocated.
Returns:
A tuple with (start, end) for the allocated range, inclusive.
"""
return cls.allocate_ids_async(size=size, max=max,
parent=parent).get_result()
@classmethod
def allocate_ids_async(cls, size=None, max=None, parent=None):
"""Allocates a range of key IDs for this model class.
This is the asynchronous version of Model.allocate_ids().
"""
from ndb import tasklets
key = Key(cls.GetKind(), None, parent=parent)
return tasklets.get_context().allocate_ids(key, size=size, max=max)
@classmethod
def get_by_id(cls, id, parent=None):
"""Returns a instance of Model class by ID.
Args:
id: A string or integer key ID.
parent: Parent key of the model to get.
Returns:
A model instance or None if not found.
"""
return cls.get_by_id_async(id, parent=parent).get_result()
@classmethod
def get_by_id_async(cls, id, parent=None):
"""Returns a instance of Model class by ID.
This is the asynchronous version of Model.get_by_id().
"""
from ndb import tasklets
key = Key(cls.GetKind(), id, parent=parent)
return tasklets.get_context().get(key)
class Property(object):
# TODO: Separate 'simple' properties from base Property class
_code_name = None
_name = None
_indexed = True
_repeated = False
_attributes = ['_name', '_indexed', '_repeated']
_positional = 1
@datastore_rpc._positional(1 + _positional)
def __init__(self, name=None, indexed=None, repeated=None):
if name is not None:
assert '.' not in name # The '.' is used elsewhere.
self._name = name
if indexed is not None:
self._indexed = indexed
if repeated is not None:
self._repeated = repeated
def __repr__(self):
args = []
cls = self.__class__
for i, attr in enumerate(self._attributes):
val = getattr(self, attr)
if val is not getattr(cls, attr):
if isinstance(val, type):
s = val.__name__
else:
s = repr(val)
if i >= cls._positional:
if attr.startswith('_'):
attr = attr[1:]
s = '%s=%s' % (attr, s)
args.append(s)
s = '%s(%s)' % (self.__class__.__name__, ', '.join(args))
return s
def _comparison(self, op, value):
from ndb.query import FilterNode # Import late to avoid circular imports.
if value is not None:
# TODO: Allow query.Binding instances?
value = self.Validate(value)
return FilterNode(self._name, op, value)
def __eq__(self, value):
return self._comparison('=', value)
def __ne__(self, value):
return self._comparison('!=', value)
def __lt__(self, value):
return self._comparison('<', value)
def __le__(self, value):
return self._comparison('<=', value)
def __gt__(self, value):
return self._comparison('>', value)
def __ge__(self, value):
return self._comparison('>=', value)
def IN(self, value):
from ndb.query import FilterNode # Import late to avoid circular imports.
if not isinstance(value, (list, tuple)):
raise datastore_errors.BadValueError('Expected list or tuple, got %r' %
(value,))
values = []
for val in value:
if val is not None:
val is self.Validate(val)
values.append(val)
return FilterNode(self._name, 'in', values)
def __neg__(self):
return datastore_query.PropertyOrder(
self._name, datastore_query.PropertyOrder.DESCENDING)
def __pos__(self):
# So you can write q.order(-cls.age, +cls.name).
return datastore_query.PropertyOrder(self._name)
# TODO: Rename these methods to start with _.
def Validate(self, value):
# Return value, or default if it is None, or raise BadValueError.
return value
def FixUp(self, code_name):
self._code_name = code_name
if self._name is None:
self._name = code_name
def StoreValue(self, entity, value):
entity._values[self._name] = value
def SetValue(self, entity, value):
if self._repeated:
if not isinstance(value, (list, tuple)):
raise datastore_errors.BadValueError('Expected list or tuple, got %r' %
(value,))
values = []
for val in value:
if val is not None:
self.Validate(val)
values.append(val)
else:
if value is not None:
value = self.Validate(value)
self.StoreValue(entity, value)
def HasValue(self, entity):
return self._name in entity._values
def RetrieveValue(self, entity):
return entity._values.get(self._name)
def GetValue(self, entity):
value = self.RetrieveValue(entity)
if value is None and self._repeated:
value = []
self.StoreValue(entity, value)
return value
def __get__(self, obj, cls=None):
if obj is None:
return self # __get__ called on class
return self.GetValue(obj)
def __set__(self, obj, value):
self.SetValue(obj, value)
# TODO: __delete__
def Serialize(self, entity, pb, prefix='', parent_repeated=False):
# entity -> pb; pb is an EntityProto message
value = self.RetrieveValue(entity)
if value is None and self._repeated:
value = []
elif not isinstance(value, list):
value = [value]
for val in value:
if self._indexed:
p = pb.add_property()
else:
p = pb.add_raw_property()
p.set_name(prefix + self._name)
p.set_multiple(self._repeated or parent_repeated)
v = p.mutable_value()
if val is not None:
self.DbSetValue(v, p, val)
def Deserialize(self, entity, p, depth=1):
# entity <- p; p is a Property message
# In this class, depth is unused.
v = p.value()
val = self.DbGetValue(v, p)
if self._repeated:
if self.HasValue(entity):
value = self.RetrieveValue(entity)
if not isinstance(value, list):
value = [value]
value.append(val)
else:
value = [val]
else:
if not self.HasValue(entity):
value = val
else:
oldval = self.RetrieveValue(entity)
# Maybe upgrade to a list property. Or ignore null.
if val is None:
value = oldval
elif oldval is None:
value = val
elif isinstance(oldval, list):
oldval.append(val)
value = oldval
else:
value = [oldval, val]
try:
self.StoreValue(entity, value)
except ComputedPropertyError, e:
pass
class BooleanProperty(Property):
def Validate(self, value):
if not isinstance(value, bool):
raise datastore_errors.BadValueError('Expected bool, got %r' %
(value,))
return value
def DbSetValue(self, v, p, value):
assert isinstance(value, bool), (self._name)
v.set_booleanvalue(value)
def DbGetValue(self, v, p):
if not v.has_booleanvalue():
return None
# The booleanvalue field is an int32, so booleanvalue() returns an
# int, hence the conversion.
return bool(v.booleanvalue())
class IntegerProperty(Property):
def Validate(self, value):
if not isinstance(value, (int, long)):
raise datastore_errors.BadValueError('Expected integer, got %r' %
(value,))
return int(value)
def DbSetValue(self, v, p, value):
assert isinstance(value, (bool, int, long)), (self._name)
v.set_int64value(value)
def DbGetValue(self, v, p):
if not v.has_int64value():
return None
return int(v.int64value())
class FloatProperty(Property):
def Validate(self, value):
if not isinstance(value, (int, long, float)):
raise datastore_errors.BadValueError('Expected float, got %r' %
(value,))
return float(value)
def DbSetValue(self, v, p, value):
assert isinstance(value, (bool, int, long, float)), (self._name)
v.set_doublevalue(float(value))
def DbGetValue(self, v, p):
if not v.has_doublevalue():
return None
return v.doublevalue()
class StringProperty(Property):
# TODO: Enforce size limit when indexed.
def Validate(self, value):
if not isinstance(value, basestring):
raise datastore_errors.BadValueError('Expected string, got %r' %
(value,))
# TODO: Always convert to Unicode? But what if it's unconvertible?
return value
def DbSetValue(self, v, p, value):
assert isinstance(value, basestring)
if isinstance(value, unicode):
value = value.encode('utf-8')
v.set_stringvalue(value)
if not self._indexed:
p.set_meaning(entity_pb.Property.TEXT)
def DbGetValue(self, v, p):
if not v.has_stringvalue():
return None
raw = v.stringvalue()
try:
raw.decode('ascii')
return raw # Don't bother with Unicode in this case
except UnicodeDecodeError:
try:
value = raw.decode('utf-8')
return value
except UnicodeDecodeError:
return raw
class TextProperty(StringProperty):
# TODO: Maybe just use StringProperty(indexed=False)?
_indexed = False
def __init__(self, *args, **kwds):
assert not kwds.get('indexed', False)
super(TextProperty, self).__init__(*args, **kwds)
class BlobProperty(Property):
# TODO: Enforce size limit when indexed.
_indexed = False
def Validate(self, value):
if not isinstance(value, str):
raise datastore_errors.BadValueError('Expected 8-bit string, got %r' %
(value,))
return value
def DbSetValue(self, v, p, value):
assert isinstance(value, str)
v.set_stringvalue(value)
if self._indexed:
p.set_meaning(entity_pb.Property.BYTESTRING)
else:
p.set_meaning(entity_pb.Property.BLOB)
def DbGetValue(self, v, p):
if not v.has_stringvalue():
return None
return v.stringvalue()
class GeoPt(tuple):
"""A geographical point. This is a tuple subclass and immutable.
Fields:
lat: latitude, a float in degrees with abs() <= 90.
lon: longitude, a float in degrees with abs() <= 180.
"""
__slots__ = []
def __new__(cls, lat=0.0, lon=0.0):
# TODO: assert abs(lat) <= 90 and abs(lon) <= 180 ???
return tuple.__new__(cls, (float(lat), float(lon)))
@property
def lat(self):
return self[0]
@property
def lon(self):
return self[1]
def __repr__(self):
return '%s(%.16g, %.16g)' % (self.__class__.__name__, self.lat, self.lon)
class GeoPtProperty(Property):
def Validate(self, value):
if not isinstance(value, GeoPt):
raise datastore_errors.BadValueError('Expected GeoPt, got %r' %
(value,))
return value
def DbSetValue(self, v, p, value):
assert isinstance(value, GeoPt), (self._name)
pv = v.mutable_pointvalue()
pv.set_x(value.lat)
pv.set_y(value.lon)
def DbGetValue(self, v, p):
if not v.has_pointvalue():
return None
pv = v.pointvalue()
return GeoPt(pv.x(), pv.y())
def _unpack_user(v):
uv = v.uservalue()
email = unicode(uv.email().decode('utf-8'))
auth_domain = unicode(uv.auth_domain().decode('utf-8'))
obfuscated_gaiaid = uv.obfuscated_gaiaid().decode('utf-8')
obfuscated_gaiaid = unicode(obfuscated_gaiaid)
federated_identity = None
if uv.has_federated_identity():
federated_identity = unicode(
uv.federated_identity().decode('utf-8'))
value = users.User(email=email,
_auth_domain=auth_domain,
_user_id=obfuscated_gaiaid,
federated_identity=federated_identity)
return value
class UserProperty(Property):
def Validate(self, value):
if not isinstance(value, users.User):
raise datastore_errors.BadValueError('Expected User, got %r' %
(value,))
return value
def DbSetValue(self, v, p, value):
datastore_types.PackUser(p.name(), value, v)
def DbGetValue(self, v, p):
return _unpack_user(v)
class KeyProperty(Property):
# TODO: namespaces
# TODO: optionally check the kind (validation)
def Validate(self, value):
if not isinstance(value, Key):
raise datastore_errors.BadValueError('Expected Key, got %r' % (value,))
# Reject incomplete keys.
if not value.id():
raise datastore_errors.BadValueError('Expected complete Key, got %r' %
(value,))
return value
def DbSetValue(self, v, p, value):
assert isinstance(value, Key)
# See datastore_types.PackKey
ref = value._reference() # Don't copy
rv = v.mutable_referencevalue() # A Reference
rv.set_app(ref.app())
if ref.has_name_space():
rv.set_name_space(ref.name_space())
for elem in ref.path().element_list():
rv.add_pathelement().CopyFrom(elem)
def DbGetValue(self, v, p):
if not v.has_referencevalue():
return None
ref = entity_pb.Reference()
rv = v.referencevalue()
if rv.has_app():
ref.set_app(rv.app())
if rv.has_name_space():
ref.set_name_space(rv.name_space())
path = ref.mutable_path()
for elem in rv.pathelement_list():
path.add_element().CopyFrom(elem)
return Key(reference=ref)
_EPOCH = datetime.datetime.utcfromtimestamp(0)
class DateTimeProperty(Property):
# NOTE: Unlike Django, auto_now_add can be overridden by setting the
# value before writing the entity. And unlike classic db, auto_now
# does not supply a default value. Also unlike classic db, when the
# entity is written, the property values are updated to match what
# was written. Finally, beware that this also updates the value in
# the in-process cache, *and* that auto_now_add may interact weirdly
# with transaction retries (a retry of a property with auto_now_add
# set will reuse the value that was set on the first try).
_attributes = Property._attributes + ['_auto_now', '_auto_now_add']
@datastore_rpc._positional(1 + Property._positional)
def __init__(self, name=None, indexed=None, repeated=None,
auto_now=False, auto_now_add=False):
if repeated:
assert not auto_now
assert not auto_now_add
super(DateTimeProperty, self).__init__(name=name,
indexed=indexed,
repeated=repeated)
self._auto_now = auto_now
self._auto_now_add = auto_now_add
def Validate(self, value):
if not isinstance(value, datetime.datetime):
raise datastore_errors.BadValueError('Expected datetime, got %r' %
(value,))
return value
def Now(self):
return datetime.datetime.now()
def Serialize(self, entity, *rest):
if (self._auto_now or
(self._auto_now_add and self.RetrieveValue(entity) is None)):
value = self.Now()
self.StoreValue(entity, value)
super(DateTimeProperty, self).Serialize(entity, *rest)
def DbSetValue(self, v, p, value):
assert isinstance(value, datetime.datetime)
assert value.tzinfo is None
dt = value - _EPOCH
ival = dt.microseconds + 1000000 * (dt.seconds + 24*3600 * dt.days)
v.set_int64value(ival)
p.set_meaning(entity_pb.Property.GD_WHEN)
def DbGetValue(self, v, p):
if not v.has_int64value():
return None
ival = v.int64value()
return _EPOCH + datetime.timedelta(microseconds=ival)
def _date_to_datetime(value):
"""Convert a date to a datetime for datastore storage.
Args:
value: A datetime.date object.
Returns:
A datetime object with time set to 0:00.
"""
assert isinstance(value, datetime.date)
return datetime.datetime(value.year, value.month, value.day)
def _time_to_datetime(value):
"""Convert a time to a datetime for datastore storage.
Args:
value: A datetime.time object.
Returns:
A datetime object with date set to 1970-01-01.
"""
assert isinstance(value, datetime.time)
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second,
value.microsecond)
class DateProperty(DateTimeProperty):
def Validate(self, value):
if (not isinstance(value, datetime.date) or
isinstance(value, datetime.datetime)):
raise datastore_errors.BadValueError('Expected date, got %r' %
(value,))
return value
def Now(self):
return datetime.date.today()
def DbSetValue(self, v, p, value):
value = _date_to_datetime(value)
super(DateProperty, self).DbSetValue(v, p, value)
def DbGetValue(self, v, p):
value = super(DateProperty, self).DbGetValue(v, p)
return value.date()
class TimeProperty(DateTimeProperty):
def Validate(self, value):
if not isinstance(value, datetime.time):
raise datastore_errors.BadValueError('Expected time, got %r' %
(value,))
return value
def Now(self):
return datetime.datetime.now().time()
def DbSetValue(self, v, p, value):
value = _time_to_datetime(value)
super(TimeProperty, self).DbSetValue(v, p, value)
def DbGetValue(self, v, p):
value = super(TimeProperty, self).DbGetValue(v, p)
return value.time()
class StructuredProperty(Property):
_modelclass = None
_attributes = ['_modelclass'] + Property._attributes
_positional = 2
@datastore_rpc._positional(1 + _positional)
def __init__(self, modelclass, name=None, indexed=None, repeated=None):
super(StructuredProperty, self).__init__(name=name,
indexed=indexed,
repeated=repeated)
if self._repeated:
assert not modelclass._has_repeated
self._modelclass = modelclass
def FixUp(self, code_name):
super(StructuredProperty, self).FixUp(code_name)
self.FixUpNestedProperties()
def FixUpNestedProperties(self):
for name, prop in self._modelclass._properties.iteritems():
prop_copy = copy.copy(prop)
prop_copy._name = self._name + '.' + prop._name
if isinstance(prop_copy, StructuredProperty):
# Guard against simple recursive model definitions.
# See model_test: testRecursiveStructuredProperty().
# TODO: Guard against indirect recursion.
if prop_copy._modelclass is not self._modelclass:
prop_copy.FixUpNestedProperties()
setattr(self, prop._code_name, prop_copy)
def _comparison(self, op, value):
if op != '=':
raise datastore_errors.BadFilterError(
'StructuredProperty filter can only use ==')
# Import late to avoid circular imports.
from ndb.query import FilterNode, ConjunctionNode, PostFilterNode
value = self.Validate(value) # None is not allowed!
filters = []
for name, prop in value._properties.iteritems():
val = prop.RetrieveValue(value)
if val is not None:
filters.append(FilterNode(self._name + '.' + name, op, val))
if not filters:
raise datastore_errors.BadFilterError(
'StructuredProperty filter without any values')
if len(filters) == 1:
return filters[0]
filters.append(PostFilterNode(self._filter_func, value))
return ConjunctionNode(filters)
def _filter_func(self, value, entity):
if isinstance(entity, Key):
raise datastore_errors.BadQueryError(
'StructuredProperty filter cannot be used with keys_only query')
subentities = getattr(entity, self._code_name, None)
if subentities is None:
return False
if not isinstance(subentities, list):
subentities = [subentities]
for subentity in subentities:
for name, prop in value._properties.iteritems():
val = prop.RetrieveValue(value)
if val is not None:
if prop.RetrieveValue(subentity) != val:
break
else:
return True
return False
def Validate(self, value):
if not isinstance(value, self._modelclass):
raise datastore_errors.BadValueError('Expected %s instance, got %r' %
(self._modelclass.__name__, value))
return value
def Serialize(self, entity, pb, prefix='', parent_repeated=False):
# entity -> pb; pb is an EntityProto message
value = self.RetrieveValue(entity)
if value is None:
# TODO: Is this the right thing for queries?
# Skip structured values that are None.
return
cls = self._modelclass
if self._repeated:
assert isinstance(value, list)
values = value
else:
assert isinstance(value, cls)
values = [value]
for value in values:
# TODO: Avoid re-sorting for repeated values.
for name, prop in sorted(value._properties.iteritems()):
prop.Serialize(value, pb, prefix + self._name + '.',
self._repeated or parent_repeated)
def Deserialize(self, entity, p, depth=1):
if not self._repeated:
subentity = self.RetrieveValue(entity)
if subentity is None:
subentity = self._modelclass()
self.StoreValue(entity, subentity)
assert isinstance(subentity, self._modelclass)
prop = subentity.GetPropertyFor(p, depth=depth)
prop.Deserialize(subentity, p, depth + 1)
return
# The repeated case is more complicated.
# TODO: Prove we won't get here for orphans.
name = p.name()
parts = name.split('.')
assert len(parts) > depth, (depth, name, parts)
next = parts[depth]
prop = self._modelclass._properties.get(next)
assert prop is not None # QED
values = self.RetrieveValue(entity)
if values is None:
values = []
elif not isinstance(values, list):
values = [values]
self.StoreValue(entity, values)
# Find the first subentity that doesn't have a value for this
# property yet.
for sub in values:
assert isinstance(sub, self._modelclass)
if not prop.HasValue(sub):
subentity = sub
break
else:
subentity = self._modelclass()
values.append(subentity)
prop.Deserialize(subentity, p, depth + 1)
_MEANING_COMPRESSED = 18
class LocalStructuredProperty(Property):
"""Substructure that is serialized to an opaque blob.
This looks like StructuredProperty on the Python side, but is
written to the datastore as a single opaque blob. It is not indexed
and you cannot query for subproperties.
"""
_indexed = False
_compressed = False
_modelclass = None
_attributes = ['_modelclass'] + Property._attributes + ['_compressed']
_positional = 2
@datastore_rpc._positional(1 + _positional)
def __init__(self, modelclass, name=None, indexed=None, repeated=None,
compressed=False):
assert not indexed
super(LocalStructuredProperty, self).__init__(name=name, repeated=repeated)
if self._repeated:
assert not modelclass._has_repeated
self._modelclass = modelclass
self._compressed = compressed
def Validate(self, value):
if not isinstance(value, self._modelclass):
raise datastore_errors.BadValueError('Expected %s instance, got %r' %
(self._modelclass.__name__, value))
return value
def DbSetValue(self, v, p, value):
pb = value.ToPb()
serialized = pb.Encode()
if self._compressed:
p.set_meaning(_MEANING_COMPRESSED)
v.set_stringvalue(zlib.compress(serialized))
else:
p.set_meaning(entity_pb.Property.BLOB)
v.set_stringvalue(serialized)
def DbGetValue(self, v, p):
if not v.has_stringvalue():
return None
serialized = v.stringvalue()
if p.has_meaning() and p.meaning() == _MEANING_COMPRESSED:
serialized = zlib.decompress(serialized)
pb = entity_pb.EntityProto(serialized)
entity = self._modelclass()
entity.FromPb(pb)
entity.key = None
return entity
class GenericProperty(Property):
# This is mainly used for orphans but can also be used explicitly
# for properties with dynamically-typed values, and in Expandos.
def DbGetValue(self, v, p):
# This is awkward but there seems to be no faster way to inspect
# what union member is present. datastore_types.FromPropertyPb(),
# the undisputed authority, has the same series of if-elif blocks.
# (We don't even want to think about multiple members... :-)
if v.has_stringvalue():
sval = v.stringvalue()
if p.meaning() not in (entity_pb.Property.BLOB,
entity_pb.Property.BYTESTRING):
try:
sval.decode('ascii')
# If this passes, don't return unicode.
except UnicodeDecodeError:
try:
sval = unicode(sval.decode('utf-8'))
except UnicodeDecodeError:
pass
return sval
elif v.has_int64value():
ival = v.int64value()
if p.meaning() == entity_pb.Property.GD_WHEN:
return _EPOCH + datetime.timedelta(microseconds=ival)
return ival
elif v.has_booleanvalue():
# The booleanvalue field is an int32, so booleanvalue() returns
# an int, hence the conversion.
return bool(v.booleanvalue())
elif v.has_doublevalue():
return v.doublevalue()
elif v.has_referencevalue():
rv = v.referencevalue()
pairs = [(elem.type(), elem.id() or elem.name())
for elem in rv.pathelement_list()]
return Key(pairs=pairs) # TODO: app, namespace
elif v.has_pointvalue():
pv = v.pointvalue()
return GeoPt(pv.x(), pv.y())
elif v.has_uservalue():
return _unpack_user(v)
else:
# A missing value implies null.
return None
def DbSetValue(self, v, p, value):
# TODO: use a dict mapping types to functions
if isinstance(value, str):
v.set_stringvalue(value)
# TODO: Set meaning to BLOB or BYTESTRING if it's not UTF-8?
# (Or TEXT if unindexed.)
elif isinstance(value, unicode):
v.set_stringvalue(value.encode('utf8'))
if not self._indexed:
p.set_meaning(entity_pb.Property.TEXT)
elif isinstance(value, bool): # Must test before int!
v.set_booleanvalue(value)
elif isinstance(value, (int, long)):
assert -2**63 <= value < 2**63
v.set_int64value(value)
elif isinstance(value, float):
v.set_doublevalue(value)
elif isinstance(value, Key):
# See datastore_types.PackKey
ref = value._reference() # Don't copy
rv = v.mutable_referencevalue() # A Reference
rv.set_app(ref.app())
if ref.has_name_space():
rv.set_name_space()
for elem in ref.path().element_list():
rv.add_pathelement().CopyFrom(elem)
elif isinstance(value, datetime.datetime):
assert value.tzinfo is None
dt = value - _EPOCH
ival = dt.microseconds + 1000000 * (dt.seconds + 24*3600 * dt.days)
v.set_int64value(ival)
p.set_meaning(entity_pb.Property.GD_WHEN)
elif isinstance(value, GeoPt):
pv = v.mutable_pointvalue()
pv.set_x(value.lat)
pv.set_y(value.lon)
elif isinstance(value, users.User):
datastore_types.PackUser(p.name(), value, v)
else:
# TODO: blobkey, atom and gdata types
assert False, type(value)
class ComputedProperty(GenericProperty):
"""A property that has its value determined by a user-supplied function.
Computed properties cannot be set directly, but are instead generated by a
function when required. They are useful to provide fields in the datastore
that can be used for filtering or sorting without having to manually set the
value in code - for example, sorting on the length of a BlobProperty, or
using an equality filter to check if another field is not empty.
ComputedProperty can be declared as a regular property, passing a function as
the first argument, or it can be used as a decorator for the function that
does the calculation.
Example:
>>> class DatastoreFile(Model):
... name = StringProperty()
... name_lower = ComputedProperty(lambda self: self.name.lower())
...
... data = BlobProperty()
...
... @ComputedProperty
... def size(self):
... return len(self.data)
...
... def _compute_hash(self):
... return hashlib.sha1(self.data).hexdigest()
... hash = ComputedProperty(_compute_hash, name='sha1')
"""
def __init__(self, derive_func, *args, **kwargs):
"""Constructor.
Args:
func: A function that takes one argument, the model instance, and returns
a calculated value.
"""
super(ComputedProperty, self).__init__(*args, **kwargs)
self.__derive_func = derive_func
def HasValue(self, entity):
return True
def StoreValue(self, entity, value):
raise ComputedPropertyError("Cannot assign to a ComputedProperty")
def RetrieveValue(self, entity):
return self.__derive_func(entity)
class Expando(Model):
def SetAttributes(self, kwds):
for name, value in kwds.iteritems():
setattr(self, name, value)
def __getattr__(self, name):
if (name.startswith('_') or
isinstance(getattr(self.__class__, name, None), Property)):
return super(Expando, self).__getattr__(name)
prop = self._properties.get(name)
if prop is None:
return super(Expando, self).__getattribute__(name)
return prop.GetValue(self)
def __setattr__(self, name, value):
if (name.startswith('_') or
isinstance(getattr(self.__class__, name, None), Property)):
return super(Expando, self).__setattr__(name, value)
self.CloneProperties()
if isinstance(value, Model):
prop = StructuredProperty(Model, name)
else:
prop = GenericProperty(name)
prop._code_name = name
self._properties[name] = prop
prop.SetValue(self, value)
# Update __all__ to contain all Property and Exception subclasses.
for _name, _object in globals().items():
if ((_name.endswith('Property') and issubclass(_object, Property)) or
(_name.endswith('Error') and issubclass(_object, Exception))):
__all__.append(_name)
| Python |
# $Id: nodes.py 6351 2010-07-03 14:19:09Z gbrandl $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import warnings
import types
import unicodedata
# ==============================
# Functional Node Base Classes
# ==============================
class Node(object):
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return True
if sys.version_info < (3,):
# on 2.x, str(node) will be a byte string with Unicode
# characters > 255 escaped; on 3.x this is no longer necessary
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
Return true if we should stop the traversal.
"""
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return stop
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
if child.walk(visitor):
stop = 1
break
except SkipSiblings:
pass
except StopTraversal:
stop = 1
return stop
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
Return true if we should stop the traversal.
"""
call_depart = 1
stop = 0
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return stop
except SkipDeparture:
call_depart = 0
children = self.children
try:
for child in children[:]:
if child.walkabout(visitor):
stop = 1
break
except SkipSiblings:
pass
except SkipChildren:
pass
except StopTraversal:
stop = 1
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
return stop
def _fast_traverse(self, cls):
"""Specialized traverse() that only supports instance checks."""
result = []
if isinstance(self, cls):
result.append(self)
for child in self.children:
result.extend(child._fast_traverse(cls))
return result
def _all_traverse(self):
"""Specialized traverse() that doesn't check for a condition."""
result = []
result.append(self)
for child in self.children:
result.extend(child._all_traverse())
return result
def traverse(self, condition=None,
include_self=1, descend=1, siblings=0, ascend=0):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For example, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=1)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
if ascend:
siblings=1
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(
include_self=1, descend=1, siblings=0, ascend=0,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=1, descend=descend,
siblings=0, ascend=0,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None,
include_self=0, descend=1, siblings=0, ascend=0):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
if sys.version_info < (3,):
class reprunicode(unicode):
"""
A class that removes the initial u from unicode's repr.
"""
def __repr__(self):
return unicode.__repr__(self)[1:]
else:
reprunicode = unicode
class Text(Node, reprunicode):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
if sys.version_info > (3,):
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
if isinstance(data, bytes):
raise TypeError('expecting str data, not bytes')
return reprunicode.__new__(cls, data)
else:
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
return reprunicode.__new__(cls, data)
def __init__(self, data, rawsource=''):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def shortrepr(self, maxlen=18):
data = self
if len(data) > maxlen:
data = data[:maxlen-4] + ' ...'
return '<%s: %s>' % (self.tagname, repr(reprunicode(data)))
def __repr__(self):
return self.shortrepr(maxlen=68)
def _dom_node(self, domroot):
return domroot.createTextNode(unicode(self))
def astext(self):
return reprunicode(self)
# Note about __unicode__: The implementation of __unicode__ here,
# and the one raising NotImplemented in the superclass Node had
# to be removed when changing Text to a subclass of unicode instead
# of UserString, since there is no way to delegate the __unicode__
# call to the superclass unicode:
# unicode itself does not have __unicode__ method to delegate to
# and calling unicode(self) or unicode.__new__ directly creates
# an infinite loop
def copy(self):
return self.__class__(reprunicode(self), rawsource=self.rawsource)
def deepcopy(self):
return self.copy()
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
# rstrip and lstrip are used by substitution definitions where
# they are expected to return a Text instance, this was formerly
# taken care of by UserString. Note that then and now the
# rawsource member is lost.
def rstrip(self, chars=None):
return self.__class__(reprunicode.rstrip(self, chars))
def lstrip(self, chars=None):
return self.__class__(reprunicode.lstrip(self, chars))
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
list_attributes = ('ids', 'classes', 'names', 'dupnames', 'backrefs')
"""List attributes, automatically initialized to empty lists for
all nodes."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Initialize list attributes.
for att in self.list_attributes:
self.attributes[att] = []
for att, value in attributes.items():
att = att.lower()
if att in self.list_attributes:
# mutable list; make a copy for this node
self.attributes[att] = value[:]
else:
self.attributes[att] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, list):
value = ' '.join([serial_escape('%s' % v) for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join(self['names']))
else:
return '<%s...>' % self.tagname
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([unicode(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
if sys.version_info > (3,):
# 2to3 doesn't convert __unicode__ to __str__
__str__ = __unicode__
def starttag(self):
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append(name)
elif isinstance(value, list):
values = [serial_escape('%s' % v) for v in value]
parts.append('%s="%s"' % (name, ' '.join(values)))
else:
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __contains__(self, key):
# support both membership test for children and attributes
# (has_key is translated to "in" by 2to3)
if isinstance(key, basestring):
return key in self.attributes
return key in self.children
def __getitem__(self, key):
if isinstance(key, basestring):
return self.attributes[key]
elif isinstance(key, int):
return self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, basestring):
self.attributes[str(key)] = item
elif isinstance(key, int):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, basestring):
del self.attributes[key]
elif isinstance(key, int):
del self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return attr in self.attributes
def delattr(self, attr):
if attr in self.attributes:
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
# support operator in
__contains__ = hasattr
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
if self[key] == [] and key in self.list_attributes:
return 0
else:
return 1
def update_basic_atts(self, dict):
"""
Update basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') from node or dictionary `dict`.
"""
if isinstance(dict, Node):
dict = dict.attributes
for att in ('ids', 'classes', 'names', 'dupnames'):
for value in dict.get(att, []):
if not value in self[att]:
self[att].append(value)
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def replace_self(self, new):
"""
Replace `self` node with `new`, where `new` is a node or a
list of nodes.
"""
update = new
if not isinstance(new, Node):
# `new` is a list; update first child.
try:
update = new[0]
except IndexError:
update = None
if isinstance(update, Element):
update.update_basic_atts(self)
else:
# `update` is a Text node or `new` is an empty list.
# Assert that we aren't losing any attributes.
for att in ('ids', 'names', 'classes', 'dupnames'):
assert not self[att], \
'Losing "%s" attribute: %s' % (att, self[att])
self.parent.replace(self, new)
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(rawsource=self.rawsource, **self.attributes)
def deepcopy(self):
copy = self.copy()
copy.extend([child.deepcopy() for child in self.children])
return copy
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
indirect_reference_name = None
"""Holds the whitespace_normalized_name (contains mixed case) of a target.
Required for MoinMoin/reST compatibility."""
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def __getstate__(self):
"""
Return dict with unpicklable references removed.
"""
state = self.__dict__.copy()
state['reporter'] = None
state['transformer'] = None
return state
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if id in self.ids and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and id not in self.ids:
break
else:
id = ''
while not id or id in self.ids:
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed ("implicit"-type system
messages are INFO/1, and "explicit"-type system messages are ERROR/3):
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
- - explicit - - new True
- - implicit - - new False
None False explicit - - new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if name in self.nameids:
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
if old_node['names'] \
and 'refuri' in old_node \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return name in self.nameids
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=1)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
subref['refname'] = whitespace_normalize_name(refname)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class container(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print 'system_message: children=%r' % (children,)
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For example, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, list) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact container copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
class StopTraversal(TreePruningException):
"""
Stop the traversal alltogether. The current node's ``depart_...`` method
is not affected. The parent nodes ``depart_...`` methods are also called
as usual. No other nodes are visited. This is an alternative to
NodeFound that does not cause exception handling to trickle up to the
caller.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = string.lower()
if not isinstance(id, unicode):
id = id.decode()
id = id.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters.
# 'ascii' lowercase to prevent problems with turkish locale.
id = unicodedata.normalize('NFKD', id).\
encode('ascii', 'ignore').decode('ascii')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
_non_id_translate = {
0x00f8: u'o', # o with stroke
0x0111: u'd', # d with stroke
0x0127: u'h', # h with stroke
0x0131: u'i', # dotless i
0x0142: u'l', # l with stroke
0x0167: u't', # t with stroke
0x0180: u'b', # b with stroke
0x0183: u'b', # b with topbar
0x0188: u'c', # c with hook
0x018c: u'd', # d with topbar
0x0192: u'f', # f with hook
0x0199: u'k', # k with hook
0x019a: u'l', # l with bar
0x019e: u'n', # n with long right leg
0x01a5: u'p', # p with hook
0x01ab: u't', # t with palatal hook
0x01ad: u't', # t with hook
0x01b4: u'y', # y with hook
0x01b6: u'z', # z with stroke
0x01e5: u'g', # g with stroke
0x0225: u'z', # z with hook
0x0234: u'l', # l with curl
0x0235: u'n', # n with curl
0x0236: u't', # t with curl
0x0237: u'j', # dotless j
0x023c: u'c', # c with stroke
0x023f: u's', # s with swash tail
0x0240: u'z', # z with swash tail
0x0247: u'e', # e with stroke
0x0249: u'j', # j with stroke
0x024b: u'q', # q with hook tail
0x024d: u'r', # r with stroke
0x024f: u'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00df: u'sz', # ligature sz
0x00e6: u'ae', # ae
0x0153: u'oe', # ligature oe
0x0238: u'db', # db digraph
0x0239: u'qp', # qp digraph
}
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
def serial_escape(value):
"""Escape string values that are elements of a list, for serialization."""
return value.replace('\\', r'\\').replace(' ', r'\ ')
#
#
# Local Variables:
# indent-tabs-mode: nil
# sentence-end-double-space: t
# fill-column: 78
# End:
| Python |
# $Id: utils.py 6314 2010-04-26 10:04:17Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import warnings
import unicodedata
from docutils import ApplicationError, DataError
from docutils import nodes
from docutils._compat import bytes
class SystemMessage(ApplicationError):
def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored as instance
attributes.
When a system message is generated, its level is compared to the stored
thresholds, and a warning or error is generated as appropriate. Debug
messages are produced iff the stored debug switch is on, independently of
other thresholds. Message output is sent to the stored warning stream if
not set to ''.
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
# system message level constants:
(DEBUG_LEVEL,
INFO_LEVEL,
WARNING_LEVEL,
ERROR_LEVEL,
SEVERE_LEVEL) = range(5)
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding=None, error_handler='backslashreplace'):
"""
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The output encoding.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
self.error_handler = error_handler
"""The character encoding error handler."""
self.debug_flag = debug
"""Show debug (level=0) system messages?"""
self.report_level = report_level
"""The level at or above which warning output will be sent
to `self.stream`."""
self.halt_level = halt_level
"""The level at or above which `SystemMessage` exceptions
will be raised, halting execution."""
if stream is None:
stream = sys.stderr
elif stream and type(stream) in (unicode, bytes):
# if `stream` is a file name, open it
if type(stream) is bytes:
stream = open(stream, 'w')
else:
stream = open(stream.encode(), 'w')
self.stream = stream
"""Where warning output is sent."""
if encoding is None:
try:
encoding = stream.encoding
except AttributeError:
pass
self.encoding = encoding or 'ascii'
"""The output character encoding."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
self.max_level = -1
"""The highest level system message generated so far."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
'set attributes via configuration settings or directly',
DeprecationWarning, stacklevel=2)
self.report_level = report_level
self.halt_level = halt_level
if stream is None:
stream = sys.stderr
self.stream = stream
self.debug_flag = debug
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
if 'base_node' in kwargs:
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
# assert source is not None, "node has line- but no source-argument"
if not 'source' in attributes: # 'line' is absolute line number
try: # look up (source, line-in-source)
source, line = self.locator(attributes.get('line'))
# print "locator lookup", kwargs.get('line'), "->", source, line
except AttributeError:
source, line = None, None
if source is not None:
attributes['source'] = source
if line is not None:
attributes['line'] = line
# assert attributes['line'] is not None, (message, kwargs)
# assert attributes['source'] is not None, (message, kwargs)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
if self.stream and (level >= self.report_level
or self.debug_flag and level == self.DEBUG_LEVEL
or level >= self.halt_level):
msgtext = msg.astext() + '\n'
try:
self.stream.write(msgtext)
except UnicodeEncodeError:
self.stream.write(msgtext.encode(self.encoding,
self.error_handler))
if level >= self.halt_level:
raise SystemMessage(msg, level)
if level > self.DEBUG_LEVEL or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
if self.debug_flag:
return self.system_message(self.DEBUG_LEVEL, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(self.INFO_LEVEL, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(self.WARNING_LEVEL, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(self.ERROR_LEVEL, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(self.SEVERE_LEVEL, *args, **kwargs)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
- `TypeError` for invalid option value types (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if convertor is None:
raise KeyError(name) # or if explicitly disabled
if name in options:
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, ' '.join(detail.args)))
return options
class NameValueError(DataError): pass
def decode_path(path):
"""
Decode file/path string. Return `nodes.reprunicode` object.
Convert to Unicode without the UnicodeDecode error of the
implicit 'ascii:strict' decoding.
"""
# see also http://article.gmane.org/gmane.text.docutils.user/2905
try:
path = path.decode(sys.getfilesystemencoding(), 'strict')
except AttributeError: # default value None has no decode method
return nodes.reprunicode(path)
except UnicodeDecodeError:
try:
path = path.decode('utf-8', 'strict')
except UnicodeDecodeError:
path = path.decode('ascii', 'replace')
return nodes.reprunicode(path)
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def new_reporter(source_path, settings):
"""
Return a new Reporter object.
:Parameters:
`source` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings.
"""
reporter = Reporter(
source_path, settings.report_level, settings.halt_level,
stream=settings.warning_stream, debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
return reporter
def new_document(source_path, settings=None):
"""
Return a new empty document object.
:Parameters:
`source_path` : string
The path to or description of the source text of the document.
`settings` : optparse.Values object
Runtime settings. If none provided, a default set will be used.
"""
from docutils import frontend
if settings is None:
settings = frontend.OptionParser().get_default_values()
source_path = decode_path(source_path)
reporter = new_reporter(source_path, settings)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.search(textnode)
if match:
paragraph[0] = nodes.Text(pattern.sub(substitution, textnode))
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source` (both files).
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or 'dummy_file').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_stylesheet_reference(settings, relative_to=None):
"""
Retrieve a stylesheet reference from the settings object.
Deprecated. Use get_stylesheet_reference_list() instead to
enable specification of multiple stylesheets as a comma-separated
list.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
if relative_to == None:
relative_to = settings._destination
return relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
# Return 'stylesheet' or 'stylesheet_path' arguments as list.
#
# The original settings arguments are kept unchanged: you can test
# with e.g. ``if settings.stylesheet_path:``
#
# Differences to ``get_stylesheet_reference``:
# * return value is a list
# * no re-writing of the path (and therefore no optional argument)
# (if required, use ``utils.relative_path(source, target)``
# in the calling script)
def get_stylesheet_list(settings):
"""
Retrieve list of stylesheet references from the settings object.
"""
if settings.stylesheet_path:
assert not settings.stylesheet, (
'stylesheet and stylesheet_path are mutually exclusive.')
return settings.stylesheet_path.split(",")
elif settings.stylesheet:
return settings.stylesheet.split(",")
else:
return []
def get_trim_footnote_ref_space(settings):
"""
Return whether or not to trim footnote space.
If trim_footnote_reference_space is not None, return it.
If trim_footnote_reference_space is None, return False unless the
footnote reference style is 'superscript'.
"""
if settings.trim_footnote_reference_space is None:
return hasattr(settings, 'footnote_references') and \
settings.footnote_references == 'superscript'
else:
return settings.trim_footnote_reference_space
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
its closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None
def escape2null(text):
"""Return a string with escape-backslashes converted to nulls."""
parts = []
start = 0
while 1:
found = text.find('\\', start)
if found == -1:
parts.append(text[start:])
return ''.join(parts)
parts.append(text[start:found])
parts.append('\x00' + text[found+1:found+2])
start = found + 2 # skip character after escape
def unescape(text, restore_backslashes=0):
"""
Return a string with nulls removed or restored to backslashes.
Backslash-escaped spaces are also removed.
"""
if restore_backslashes:
return text.replace('\x00', '\\')
else:
for sep in ['\x00 ', '\x00\n', '\x00']:
text = ''.join(text.split(sep))
return text
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def east_asian_column_width(text):
if isinstance(text, unicode):
total = 0
for c in text:
total += east_asian_widths[unicodedata.east_asian_width(c)]
return total
else:
return len(text)
if hasattr(unicodedata, 'east_asian_width'):
column_width = east_asian_column_width
else:
column_width = len
def uniq(L):
r = []
for item in L:
if not item in r:
r.append(item)
return r
class DependencyList:
"""
List of dependencies, with file recording support.
Note that the output file is not automatically closed. You have
to explicitly call the close() method.
"""
def __init__(self, output_file=None, dependencies=[]):
"""
Initialize the dependency list, automatically setting the
output file to `output_file` (see `set_output()`) and adding
all supplied dependencies.
"""
self.set_output(output_file)
for i in dependencies:
self.add(i)
def set_output(self, output_file):
"""
Set the output file and clear the list of already added
dependencies.
`output_file` must be a string. The specified file is
immediately overwritten.
If output_file is '-', the output will be written to stdout.
If it is None, no file output is done when calling add().
"""
self.list = []
if output_file == '-':
self.file = sys.stdout
elif output_file:
self.file = open(output_file, 'w')
else:
self.file = None
def add(self, *filenames):
"""
If the dependency `filename` has not already been added,
append it to self.list and print it to self.file if self.file
is not None.
"""
for filename in filenames:
if not filename in self.list:
self.list.append(filename)
if self.file is not None:
print >>self.file, filename
def close(self):
"""
Close the output file.
"""
self.file.close()
self.file = None
def __repr__(self):
if self.file:
output_file = self.file.name
else:
output_file = None
return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
| Python |
# -*- coding: utf8 -*-
# $Id: __init__.py 6348 2010-06-28 22:08:05Z milde $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
from docutils import frontend, nodes, languages, writers, utils, io
from docutils.transforms import writer_aux
# compatibility module for Python 2.3
if not hasattr(string, 'Template'):
import docutils._string_template_compat
string.Template = docutils._string_template_compat.Template
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(__file__)
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Footnotes with numbers/symbols by Docutils. (default)',
['--docutils-footnotes'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Alias for --docutils-footnotes (deprecated)',
['--use-latex-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for footnote text (deprecated)',
['--figure-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Like --stylesheet, but the path is rewritten '
'relative to the output file. ',
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet'}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines', 'borderless'],
'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
relative_path_settings = ('stylesheet_path',)
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup', 'title', 'subtitle')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
# call the parent class' method
transform_list = writers.Writer.get_transforms(self)
# print transform_list
# Convert specific admonitions to generic one
transform_list.append(writer_aux.Admonitions)
# TODO: footnote collection transform
# transform_list.append(footnotes.collect)
return transform_list
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
file = open(self.document.settings.template, 'rb')
except IOError:
file = open(os.path.join(os.path.dirname(__file__),
self.document.settings.template), 'rb')
template = string.Template(unicode(file.read(), 'utf-8'))
file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
def __init__(self, lang):
self.language = lang
self.quote_index = 0
self.quotes = ('``', "''")
self.setup = '' # language dependent configuration code
# double quotes are "active" in some languages (e.g. German).
# TODO: use \textquotedbl in OT1 font encoding?
self.literal_double_quote = u'"'
if self.language.startswith('de'):
self.quotes = (r'\glqq{}', r'\grqq{}')
self.literal_double_quote = ur'\dq{}'
if self.language.startswith('it'):
self.literal_double_quote = ur'{\char`\"}'
if self.language.startswith('es'):
# reset tilde ~ to the original binding (nobreakspace):
self.setup = ('\n'
r'\addto\shorthandsspanish{\spanishdeactivate{."~<>}}')
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1) % 2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def get_language(self):
lang = self.language.split('_')[0] # filter dialects
return self._ISO639_TO_BABEL.get(lang, "")
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = self.keys()
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centerline{\textbf{#1}}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\textwidth}{#2}}
\end{center}
\fi
}"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\textwidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.embedded_package_wrapper = r"""\makeatletter
%% embedded stylesheet: %s
%s
\makeatother"""
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.footnote_floats = r"""% settings for footnotes as floats:
\setlength{\floatsep}{0.5em}
\setlength{\textfloatsep}{\fill}
\addtolength{\textfloatsep}{3em}
\renewcommand{\textfraction}{0.5}
\renewcommand{\topfraction}{0.5}
\renewcommand{\bottomfraction}{0.5}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi'))"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[unicode,colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\textwidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.table = r"""\usepackage{longtable}
\usepackage{array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
# Options [force,almostfull] prevent spurious error messages, see
# de.comp.text.tex/2005-12/msg01855
PreambleCmds.textcomp = """\
\\usepackage{textcomp} % text symbol macros"""
PreambleCmds.documenttitle = r"""
%% Document title
\title{%s}
\author{%s}
\date{%s}
\maketitle
"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else:
return self.sections[-1]
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
"""
def __init__(self,translator,latex_type,table_style):
self._translator = translator
self._latex_type = latex_type
self._table_style = table_style
self._open = 0
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self._in_thead = 0
def open(self):
self._open = 1
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = 0 # maybe context with search
def close(self):
self._open = 0
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
def is_open(self):
return self._open
def set_table_style(self, table_style):
if not table_style in ('standard','booktabs','borderless','nolines'):
return
self._table_style = table_style
def get_latex_type(self):
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
if self._latex_type == 'longtable':
# otherwise longtable might move before paragraph and subparagraph
prefix = '\\leavevmode\n'
else:
prefix = ''
prefix += '\setlength{\DUtablewidth}{\linewidth}'
return '%s\n\\begin{%s}[c]' % (prefix, self._latex_type)
def get_closing(self):
line = ''
if self._table_style == 'booktabs':
line = '\\bottomrule\n'
elif self._table_style == 'standard':
lines = '\\hline\n'
return '%s\\end{%s}' % (line,self._latex_type)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row-1]
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while 1:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row-1]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = 0
# If using compound enumerations, include section information.
section_prefix_for_enumerators = 0
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# default link color
hyperlink_color = 'blue'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self.embed_stylesheet = settings.embed_stylesheet
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = settings.font_encoding
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', '\\_'))
# literal blocks:
self.literal_block_env = ''
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language:
# (labels, bibliographic_fields, and author_separators)
self.language = languages.get_language(settings.language_code)
self.babel = Babel(settings.language_code)
self.author_separator = self.language.author_separators[0]
self.d_options = [self.settings.documentoptions,
self.babel.get_language()]
self.d_options = ','.join([opt for opt in self.d_options if opt])
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes:
self.docutils_footnotes = settings.docutils_footnotes
if settings.use_latex_footnotes:
self.docutils_footnotes = True
self.warn('`use_latex_footnotes` is deprecated. '
'The setting has been renamed to `docutils_footnotes` '
'and the alias will be removed in a future version.')
self.figure_footnotes = settings.figure_footnotes
if self.figure_footnotes:
self.docutils_footnotes = True
self.warn('The "figure footnotes" workaround/setting is strongly '
'deprecated and will be removed in a future version.')
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.d_options, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.latex_preamble = [settings.latex_preamble]
self.stylesheet = []
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # title data and \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
# date (the default supresses the "auto-date" feature of \maketitle)
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable', settings.table_style)
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Static requirements
# TeX font encoding
if self.font_encoding:
encodings = [r'\usepackage[%s]{fontenc}' % self.font_encoding]
else:
encodings = [r'%\usepackage[OT1]{fontenc}'] # just a comment
# Docutils' output-encoding => TeX input encoding:
if self.latex_encoding != 'ascii':
encodings.append(r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
self.requirements['_static'] = '\n'.join(
encodings + [
r'\usepackage{ifthen}',
# multi-language support (language is in document options)
'\\usepackage{babel}%s' % self.babel.setup,
])
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.d_options.find('DIV') != -1 or
self.d_options.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# get list of style sheets from settings
styles = utils.get_stylesheet_list(settings)
# adapt path if --stylesheet_path is used
if settings.stylesheet_path and not(self.embed_stylesheet):
styles = [utils.relative_path(settings._destination, sheet)
for sheet in styles]
for sheet in styles:
(base, ext) = os.path.splitext(sheet)
is_package = ext in ['.sty', '']
if self.embed_stylesheet:
if is_package:
sheet = base + '.sty' # adapt package name
# wrap in \makeatletter, \makeatother
wrapper = PreambleCmds.embedded_package_wrapper
else:
wrapper = '%% embedded stylesheet: %s\n%s'
settings.record_dependencies.add(sheet)
self.stylesheet.append(wrapper %
(sheet, io.FileInput(source_path=sheet, encoding='utf-8').read()))
else: # link to style sheet
if is_package:
self.stylesheet.append(r'\usepackage{%s}' % base)
else:
self.stylesheet.append(r'\input{%s}' % sheet)
# PDF setup
if self.hyperlink_color == '0':
self.hyperlink_color = 'black'
self.colorlinks = 'false'
else:
self.colorlinks = 'true'
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# LaTeX section numbering
if not self.settings.sectnum_xform: # section numbering by LaTeX:
# sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# else value of the "depth" argument: translate to LaTeX level
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if settings.sectnum_depth is not None:
# limit to supported levels
sectnum_depth = min(settings.sectnum_depth,
len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
sectnum_depth -= 1
if self.d_class.sections[0] == 'part':
sectnum_depth -= 1
self.requirements['sectnum_depth'] = (
r'\setcounter{secnumdepth}{%d}' % sectnum_depth)
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# convert: latin-1, latin_1, utf-8 and similar things
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language.labels[docutil_label]
def ensure_math(self, text):
if not hasattr(self, 'ensure_math_re'):
chars = { # lnot,pm,twosuperior,threesuperior,mu,onesuperior,times,div
'latin1' : '\xac\xb1\xb2\xb3\xb5\xb9\xd7\xf7' , # ¬±²³µ¹×÷
# TODO?: use texcomp instead.
}
self.ensure_math_re = re.compile('([%s])' % chars['latin1'])
text = self.ensure_math_re.sub(r'\\ensuremath{\1}', text)
return text
def encode(self, text):
"""Return text with 'problematic' characters escaped.
Escape the ten special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
Separate ``-`` (and more in literal text) to prevent input ligatures.
Translate non-supported Unicode characters.
"""
if self.verbatim:
return text
# Separate compound characters, e.g. '--' to '-{}-'.
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
# LaTeX encoding maps:
special_chars = {
ord('#'): ur'\#',
ord('$'): ur'\$',
ord('%'): ur'\%',
ord('&'): ur'\&',
ord('~'): ur'\textasciitilde{}',
ord('_'): ur'\_',
ord('^'): ur'\textasciicircum{}',
ord('\\'): ur'\textbackslash{}',
ord('{'): ur'\{',
ord('}'): ur'\}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): ur'{[}',
ord(']'): ur'{]}'
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode_chars = {
0x00A0: ur'~', # NO-BREAK SPACE
0x00AD: ur'\-', # SOFT HYPHEN
#
0x2011: ur'\hbox{-}', # NON-BREAKING HYPHEN
0x21d4: ur'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: ur'$\spadesuit$',
0x2663: ur'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
unicode_chars = {
0x200C: ur'\textcompwordmark', # ZERO WIDTH NON-JOINER
0x2013: ur'\textendash{}',
0x2014: ur'\textemdash{}',
0x2018: ur'\textquoteleft{}',
0x2019: ur'\textquoteright{}',
0x201A: ur'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: ur'\textquotedblleft{}',
0x201D: ur'\textquotedblright{}',
0x201E: ur'\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: ur'\textperthousand{}', # PER MILLE SIGN
0x2031: ur'\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: ur'\guilsinglleft{}',
0x203A: ur'\guilsinglright{}',
0x2423: ur'\textvisiblespace{}', # OPEN BOX
0x2020: ur'\dag{}',
0x2021: ur'\ddag{}',
0x2026: ur'\dots{}',
0x2122: ur'\texttrademark{}',
}
# Unicode chars that require a feature/package to render
pifont_chars = {
0x2665: ur'\ding{170}', # black heartsuit
0x2666: ur'\ding{169}', # black diamondsuit
}
# recognized with 'utf8', if textcomp is loaded
textcomp_chars = {
# Latin-1 Supplement
0x00a2: ur'\textcent{}', # ¢ CENT SIGN
0x00a4: ur'\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: ur'\textyen{}', # ¥ YEN SIGN
0x00a6: ur'\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: ur'\textsection{}', # § SECTION SIGN
0x00a8: ur'\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: ur'\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: ur'\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: ur'\textlnot{}', # ¬ NOT SIGN
0x00ae: ur'\textregistered{}', # ® REGISTERED SIGN
0x00af: ur'\textasciimacron{}', # ¯ MACRON
0x00b0: ur'\textdegree{}', # ° DEGREE SIGN
0x00b1: ur'\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: ur'\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: ur'\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: ur'\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: ur'\textmu{}', # µ MICRO SIGN
0x00b6: ur'\textparagraph{}', # ¶ PILCROW SIGN # not equal to \textpilcrow
0x00b9: ur'\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: ur'\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: ur'\textonequarter{}', # 1/4 FRACTION
0x00bd: ur'\textonehalf{}', # 1/2 FRACTION
0x00be: ur'\textthreequarters{}', # 3/4 FRACTION
0x00d7: ur'\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: ur'\textdiv{}', # ÷ DIVISION SIGN
#
0x0192: ur'\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: ur'\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: ur'\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: ur'\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: ur'\textbullet{}', # BULLET
0x2032: ur'\textasciiacute{}', # PRIME
0x2033: ur'\textacutedbl{}', # DOUBLE PRIME
0x2035: ur'\textasciigrave{}', # REVERSED PRIME
0x2036: ur'\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: ur'\textreferencemark{}', # REFERENCE MARK
0x203d: ur'\textinterrobang{}', # INTERROBANG
0x2044: ur'\textfractionsolidus{}', # FRACTION SLASH
0x2045: ur'\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: ur'\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: ur'\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: ur'\textcolonmonetary{}', # COLON SIGN
0x20a3: ur'\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: ur'\textlira{}', # LIRA SIGN
0x20a6: ur'\textnaira{}', # NAIRA SIGN
0x20a9: ur'\textwon{}', # WON SIGN
0x20ab: ur'\textdong{}', # DONG SIGN
0x20ac: ur'\texteuro{}', # EURO SIGN
0x20b1: ur'\textpeso{}', # PESO SIGN
0x20b2: ur'\textguarani{}', # GUARANI SIGN
0x2103: ur'\textcelsius{}', # DEGREE CELSIUS
0x2116: ur'\textnumero{}', # NUMERO SIGN
0x2117: ur'\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: ur'\textrecipe{}', # PRESCRIPTION TAKE
0x2120: ur'\textservicemark{}', # SERVICE MARK
0x2122: ur'\texttrademark{}', # TRADE MARK SIGN
0x2126: ur'\textohm{}', # OHM SIGN
0x2127: ur'\textmho{}', # INVERTED OHM SIGN
0x212e: ur'\textestimated{}', # ESTIMATED SYMBOL
0x2190: ur'\textleftarrow{}', # LEFTWARDS ARROW
0x2191: ur'\textuparrow{}', # UPWARDS ARROW
0x2192: ur'\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: ur'\textdownarrow{}', # DOWNWARDS ARROW
0x2212: ur'\textminus{}', # MINUS SIGN
0x2217: ur'\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: ur'\textsurd{}', # SQUARE ROOT
0x2422: ur'\textblank{}', # BLANK SYMBOL
0x25e6: ur'\textopenbullet{}', # WHITE BULLET
0x25ef: ur'\textbigcircle{}', # LARGE CIRCLE
0x266a: ur'\textmusicalnote{}', # EIGHTH NOTE
0x26ad: ur'\textmarried{}', # MARRIAGE SYMBOL
0x26ae: ur'\textdivorced{}', # DIVORCE SYMBOL
0x27e8: ur'\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: ur'\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
# set up the translation table:
table = special_chars
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', '']:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = ur'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = ur'\textbar{}'
table[ord('<')] = ur'\textless{}'
table[ord('>')] = ur'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = ur'~'
if self.literal:
# double quotes are 'active' in some languages
table[ord('"')] = self.babel.literal_double_quote
else:
text = self.babel.quote_quotes(text)
# Unicode chars:
table.update(unsupported_unicode_chars)
table.update(pifont_chars)
if not self.latex_encoding.startswith('utf8'):
table.update(unicode_chars)
table.update(textcomp_chars)
# Characters that require a feature/package to render
for ch in text:
if ord(ch) in pifont_chars:
self.requirements['pifont'] = '\\usepackage{pifont}'
if ord(ch) in textcomp_chars:
self.requirements['textcomp'] = PreambleCmds.textcomp
text = text.translate(table)
# Break up input ligatures
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
# for blank lines, insert a protected space, to avoid
# ! LaTeX Error: There's no line here to end.
textlines = [line + '~'*(not line.lstrip())
for line in text.split('\n')]
text = '\\\\\n'.join(textlines)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
if not self.latex_encoding.startswith('utf8'):
text = self.ensure_math(text)
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? (update or delete)
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
self.out.append('\n\\DUadmonition[%s]{\n' % ','.join(node['classes']))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
def depart_block_quote(self, node):
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
def depart_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append( '\\caption{' )
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.push_output_collector([])
else:
# TODO: do we need these?
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=0, siblings=1,
include_self=0)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append('}]')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = 1
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node)
def depart_document(self, node):
# Complete header with information gained from walkabout
# a) conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# b) coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# c) PDF properties
self.pdfsetup.append(PreambleCmds.linking % (self.colorlinks,
self.hyperlink_color,
self.hyperlink_color))
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# a) document title (part 'body_prefix'):
# NOTE: Docutils puts author/date into docinfo, so normally
# we do not want LaTeX author/date handling (via \maketitle).
# To deactivate it, we add \title, \author, \date,
# even if the arguments are empty strings.
if self.title or self.author_stack or self.date:
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
title = [''.join(self.title)] + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\large{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.body_pre_docinfo.append(PreambleCmds.documenttitle % (
'%\n '.join(title),
' \\and\n'.join(authors),
', '.join(self.date)))
# b) bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# c) make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
# BUG: the following fails, with more than one multirow
# starting in the second column (or later) see
# ../../../test/functional/input/data/latex.txt
if self.active_table.get_entry_number() == 1:
# if the first row is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.out.append(' & ')
# multirow, multicolumn
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
count = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number()-1,count)
self.out.append('\\multirow{%d}{%s}{%%' %
(count,self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%sl%s}{' %
(count, bar1, self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
self.out.append('\\textbf{%')
self.context.append('}')
elif self.active_table.is_stub_column():
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ''
if 'suffix' in node:
enum_suffix = node['suffix']
enum_prefix = ''
if 'prefix' in node:
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ''
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for ctype, cname in self._enumeration_counters:
enum_prefix += '\\%s{%s}.' % (ctype, cname)
enum_type = 'arabic'
if 'enumtype' in node:
enum_type = node['enumtype']
if enum_type in enum_style:
enum_type = enum_style[enum_type]
counter_name = 'listcnt%d' % len(self._enumeration_counters)
self._enumeration_counters.append((enum_type, counter_name))
# If we haven't used this counter name before, then create a
# new counter; otherwise, reset & reuse the old counter.
if len(self._enumeration_counters) > self._max_enumeration_counters:
self._max_enumeration_counters = len(self._enumeration_counters)
self.out.append('\\newcounter{%s}\n' % counter_name)
else:
self.out.append('\\setcounter{%s}{0}\n' % counter_name)
self.out.append('\\begin{list}{%s\\%s{%s}%s}\n' %
(enum_prefix,enum_type,counter_name,enum_suffix))
self.out.append('{\n')
self.out.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if 'start' in node:
self.out.append('\\addtocounter{%s}{%d}\n' %
(counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.out.append('}\n')
def depart_enumerated_list(self, node):
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# ! the 'align' attribute should set "outer alignment" !
# For "inner alignment" use LaTeX default alignment (similar to HTML)
## if ('align' not in node.attributes or
## node.attributes['align'] == 'center'):
## align = '\n\\centering'
## align_end = ''
## else:
## # TODO non vertical space for other alignments.
## align = '\\begin{flush%s}' % node.attributes['align']
## align_end = '\\end{flush%s}' % node.attributes['align']
## self.out.append( '\\begin{figure}%s\n' % align )
## self.context.append( '%s\\end{figure}\n' % align_end )
self.out.append('\\begin{figure}')
if node.get('ids'):
self.out += ['\n'] + self.ids_to_labels(node)
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.settings.figure_footnotes:
self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append('\\begin{figure}[b]')
self.append_hypertargets(node)
if node.get('id') == node.get('name'): # explicite label
self.out += self.ids_to_labels(node)
elif self.docutils_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num,text = node.astext().split(None,1)
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace:
self.out.append('%')
## else: # TODO: "real" LaTeX \footnote{}s
def depart_footnote(self, node):
if self.figure_footnotes:
self.out.append('\\end{figure}\n')
else:
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if not self.figure_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.out.append(bracket)
else:
self.out.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str):
"""Convert string with rst lenght to LaTeX"""
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Add image URI to dependency list, assuming that it's
# referring to a local file.
self.settings.record_dependencies.add(attrs['uri'])
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
display_style = ('block-', 'inline-')[self.is_inline(node)]
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\textwidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not self.is_inline(node):
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, attrs['uri']))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
def depart_line_block(self, node):
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# BUG: fails if normal text preceeds the literal block.
self.out.append('%\n\\begin{quote}')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.literal_block_env != '' and self.is_plaintext(node):
self.requirements['literal_block'] = packages.get(
self.literal_block_env, '')
self.verbatim = True
self.out.append('\\begin{%s}%s\n' % (self.literal_block_env,
self.literal_block_options))
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
def depart_literal_block(self, node):
if self.verbatim:
self.out.append('\n\\end{%s}\n' % self.literal_block_env)
self.verbatim = False
else:
self.out.append('\n}')
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag tha the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# no newline if the paragraph is first in a list item
if ((isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description)) and
node is node.parent[0]):
return
index = node.parent.index(node)
if (isinstance(node.parent, nodes.compound) and
index > 0 and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
return
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_paragraph(self, node):
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): ur'\#',
ord('%'): ur'\%',
ord('\\'): ur'\\',
}
# external reference (URL)
if 'refuri' in node:
href = unicode(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': ('---', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\n\\begin{flushright}\n')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
self.out.append('\\end{flushright}\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['title'] = PreambleCmds.title
node['classes'] = ['system-message']
self.visit_admonition(node)
self.out.append('\\DUtitle[system-message]{system-message}\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular',self.settings.table_style)
self.active_table.open()
for cls in node['classes']:
self.active_table.set_table_style(cls)
if self.active_table._table_style == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.out.append('\n' + self.active_table.get_opening())
def depart_table(self, node):
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
else:
self.active_table.set_table_style(self.settings.table_style)
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs())
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def bookmark(self, node):
"""Return label and pdfbookmark string for titles."""
result = ['']
if self.settings.sectnum_xform: # "starred" section cmd
# add to the toc and pdfbookmarks
section_name = self.d_class.section(max(self.section_level, 1))
section_title = self.encode(node.astext())
result.append(r'\phantomsection')
result.append(r'\addcontentsline{toc}{%s}{%s}' %
(section_name, section_title))
result += self.ids_to_labels(node.parent, set_anchor=False)
return '%\n '.join(result) + '%\n'
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
self.out.append('\n\n')
self.out.append('%' + '_' * 75)
self.out.append('\n\n')
#
section_name = self.d_class.section(self.section_level)
# number sections?
if (self.settings.sectnum_xform # numbering by Docutils
or (self.section_level > len(self.d_class.sections))):
section_star = '*'
else: # LaTeX numbered sections
section_star = ''
self.out.append(r'\%s%s{' % (section_name, section_star))
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
self.out.append('\color{red}')
# label and ToC entry:
self.context.append(self.bookmark(node) + '}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if (isinstance(node.parent, nodes.table) or
node.parent.tagname == 'document'):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(title, node, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_inline(self, node): # <span>, i.e. custom roles
# insert fallback definition
self.fallbacks['inline'] = PreambleCmds.inline
self.out += [r'\DUrole{%s}{' % cls for cls in node['classes']]
self.context.append('}' * (len(node['classes'])))
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
| Python |
# -*- coding: utf-8 -*-
# $Id: manpage.py 6270 2010-03-18 22:32:09Z milde $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module is put into the public domain.
"""
Simple man page writer for reStructuredText.
Man pages (short for "manual pages") contain system documentation on unix-like
systems. The pages are grouped in numbered sections:
1 executable programs and shell commands
2 system calls
3 library functions
4 special files
5 file formats
6 games
7 miscellaneous
8 system administration
Man pages are written *troff*, a text file formatting system.
See http://www.tldp.org/HOWTO/Man-Page for a start.
Man pages have no subsection only parts.
Standard parts
NAME ,
SYNOPSIS ,
DESCRIPTION ,
OPTIONS ,
FILES ,
SEE ALSO ,
BUGS ,
and
AUTHOR .
A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
by the command whatis or apropos.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import time
import re
import docutils
from docutils import nodes, utils, writers, languages
import roman
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
OPTION_LIST_INDENT = 7
BLOCKQOUTE_INDENT = 3.5
# Define two macros so man/roff can calculate the
# indent/unindent margins by itself
MACRO_DEF = (r""".
.nr rst2man-indent-level 0
.
.de1 rstReportMargin
\\$1 \\n[an-margin]
level \\n[rst2man-indent-level]
level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
-
\\n[rst2man-indent0]
\\n[rst2man-indent1]
\\n[rst2man-indent2]
..
.de1 INDENT
.\" .rstReportMargin pre:
. RS \\$1
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
. nr rst2man-indent-level +1
.\" .rstReportMargin post:
..
.de UNINDENT
. RE
.\" indent \\n[an-margin]
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
.nr rst2man-indent-level -1
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
""")
class Writer(writers.Writer):
supported = ('manpage',)
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class Table:
def __init__(self):
self._rows = []
self._options = ['center', ]
self._tab_char = '\t'
self._coldefs = []
def new_row(self):
self._rows.append([])
def append_separator(self, separator):
"""Append the separator for table head."""
self._rows.append([separator])
def append_cell(self, cell_lines):
"""cell_lines is an array of lines"""
start = 0
if len(cell_lines)>0 and cell_lines[0] == '.sp\n':
start = 1
self._rows[-1].append(cell_lines[start:])
if len(self._coldefs) < len(self._rows[-1]):
self._coldefs.append('l')
def _minimize_cell(self, cell_lines):
"""Remove leading and trailing blank and ``.sp`` lines"""
while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
del cell_lines[0]
while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
del cell_lines[-1]
def as_list(self):
text = ['.TS\n']
text.append(' '.join(self._options) + ';\n')
text.append('|%s|.\n' % ('|'.join(self._coldefs)))
for row in self._rows:
# row = array of cells. cell = array of lines.
text.append('_\n') # line above
text.append('T{\n')
for i in range(len(row)):
cell = row[i]
self._minimize_cell(cell)
text.extend(cell)
if not text[-1].endswith('\n'):
text[-1] += '\n'
if i < len(row)-1:
text.append('T}'+self._tab_char+'T{\n')
else:
text.append('T}\n')
text.append('_\n')
text.append('.TE\n')
return text
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
document_start = """Man page generated from reStructeredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode)
self.head = []
self.body = []
self.foot = []
self.section_level = 0
self.context = []
self.topic_class = ''
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
# the list style "*" bullet or "#" numbered
self._list_char = []
# writing the header .TH and .SH NAME is postboned after
# docinfo.
self._docinfo = {
"title" : "", "title_upper": "",
"subtitle" : "",
"manual_section" : "", "manual_group" : "",
"author" : [],
"date" : "",
"copyright" : "",
"version" : "",
}
self._docinfo_keys = [] # a list to keep the sequence as in source.
self._docinfo_names = {} # to get name from text not normalized.
self._in_docinfo = None
self._active_table = None
self._in_literal = False
self.header_written = 0
self._line_block = 0
self.authors = []
self.section_level = 0
self._indent = [0]
# central definition of simple processing rules
# what to output on : visit, depart
# Do not use paragraph requests ``.PP`` because these set indentation.
# use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
#
# Fonts are put on a stack, the top one is used.
# ``.ft P`` or ``\\fP`` pop from stack.
# ``B`` bold, ``I`` italic, ``R`` roman should be available.
# Hopefully ``C`` courier too.
self.defs = {
'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
'definition_list_item' : ('.TP', ''),
'field_name' : ('.TP\n.B ', '\n'),
'literal' : ('\\fB', '\\fP'),
'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
'option_list_item' : ('.TP\n', ''),
'reference' : (r'\fI\%', r'\fP'),
'emphasis': ('\\fI', '\\fP'),
'strong' : ('\\fB', '\\fP'),
'term' : ('\n.B ', '\n'),
'title_reference' : ('\\fI', '\\fP'),
'topic-title' : ('.SS ', ),
'sidebar-title' : ('.SS ', ),
'problematic' : ('\n.nf\n', '\n.fi\n'),
}
# NOTE dont specify the newline before a dot-command, but ensure
# it is there.
def comment_begin(self, text):
"""Return commented version of the passed text WITHOUT end of
line/comment."""
prefix = '.\\" '
out_text = ''.join(
[(prefix + in_line + '\n')
for in_line in text.split('\n')])
return out_text
def comment(self, text):
"""Return commented version of the passed text."""
return self.comment_begin(text)+'.\n'
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if self.body[-1][-1] != '\n':
self.body.append('\n')
def astext(self):
"""Return the final formatted document as a string."""
if not self.header_written:
# ensure we get a ".TH" as viewers require it.
self.head.append(self.header())
# filter body
for i in xrange(len(self.body)-1,0,-1):
# remove superfluous vertical gaps.
if self.body[i] == '.sp\n':
if self.body[i-1][:4] in ('.BI ','.IP '):
self.body[i] = '.\n'
elif (self.body[i-1][:3] == '.B ' and
self.body[i-2][:4] == '.TP\n'):
self.body[i] = '.\n'
elif (self.body[i-1] == '\n' and
self.body[i-2][0] != '.' and
(self.body[i-3][:7] == '.TP\n.B '
or self.body[i-3][:4] == '\n.B ')
):
self.body[i] = '.\n'
return ''.join(self.head + self.body + self.foot)
def deunicode(self, text):
text = text.replace(u'\xa0', '\\ ')
text = text.replace(u'\u2020', '\\(dg')
return text
def visit_Text(self, node):
text = node.astext()
text = text.replace('\\','\\e')
replace_pairs = [
(u'-', ur'\-'),
(u'\'', ur'\(aq'),
(u'´', ur'\''),
(u'`', ur'\(ga'),
]
for (in_char, out_markup) in replace_pairs:
text = text.replace(in_char, out_markup)
# unicode
text = self.deunicode(text)
if self._in_literal:
# prevent interpretation of "." at line start
if text[0] == '.':
text = '\\&' + text
text = text.replace('\n.', '\n\\&.')
self.body.append(text)
def depart_Text(self, node):
pass
def list_start(self, node):
class enum_char:
enum_style = {
'bullet' : '\\(bu',
'emdash' : '\\(em',
}
def __init__(self, style):
self._style = style
if node.has_key('start'):
self._cnt = node['start'] - 1
else:
self._cnt = 0
self._indent = 2
if style == 'arabic':
# indentation depends on number of childrens
# and start value.
self._indent = len(str(len(node.children)))
self._indent += len(str(self._cnt)) + 1
elif style == 'loweralpha':
self._cnt += ord('a') - 1
self._indent = 3
elif style == 'upperalpha':
self._cnt += ord('A') - 1
self._indent = 3
elif style.endswith('roman'):
self._indent = 5
def next(self):
if self._style == 'bullet':
return self.enum_style[self._style]
elif self._style == 'emdash':
return self.enum_style[self._style]
self._cnt += 1
# TODO add prefix postfix
if self._style == 'arabic':
return "%d." % self._cnt
elif self._style in ('loweralpha', 'upperalpha'):
return "%c." % self._cnt
elif self._style.endswith('roman'):
res = roman.toRoman(self._cnt) + '.'
if self._style.startswith('upper'):
return res.upper()
return res.lower()
else:
return "%d." % self._cnt
def get_width(self):
return self._indent
def __repr__(self):
return 'enum_style-%s' % list(self._style)
if node.has_key('enumtype'):
self._list_char.append(enum_char(node['enumtype']))
else:
self._list_char.append(enum_char('bullet'))
if len(self._list_char) > 1:
# indent nested lists
self.indent(self._list_char[-2].get_width())
else:
self.indent(self._list_char[-1].get_width())
def list_end(self):
self.dedent()
self._list_char.pop()
def header(self):
tmpl = (".TH %(title_upper)s %(manual_section)s"
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def append_header(self):
"""append header with .TH and .SH NAME"""
# NOTE before everything
# .TH title_upper section date source manual
if self.header_written:
return
self.body.append(self.header())
self.body.append(MACRO_DEF)
self.header_written = 1
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
pass
def visit_admonition(self, node, name=None):
if name:
self.body.append('.IP %s\n' %
self.language.labels.get(name, name))
def depart_admonition(self, node):
self.body.append('.RE\n')
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
depart_attention = depart_admonition
def visit_docinfo_item(self, node, name):
if name == 'author':
self._docinfo[name].append(node.astext())
else:
self._docinfo[name] = node.astext()
self._docinfo_keys.append(name)
raise nodes.SkipNode
def depart_docinfo_item(self, node):
pass
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
depart_author = depart_docinfo_item
def visit_authors(self, node):
# _author is called anyway.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
# BUG/HACK: indent alway uses the _last_ indention,
# thus we need two of them.
self.indent(BLOCKQOUTE_INDENT)
self.indent(0)
def depart_block_quote(self, node):
self.dedent()
self.dedent()
def visit_bullet_list(self, node):
self.list_start(node)
def depart_bullet_list(self, node):
self.list_end()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
depart_caution = depart_admonition
def visit_citation(self, node):
num,text = node.astext().split(None,1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % num)
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
self.body.append('['+node.astext()+']')
raise nodes.SkipNode
def visit_classifier(self, node):
pass
def depart_classifier(self, node):
pass
def visit_colspec(self, node):
self.colspecs.append(node)
def depart_colspec(self, node):
pass
def write_colspecs(self):
self.body.append("%s.\n" % ('L '*len(self.colspecs)))
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
self.body.append(self.comment(node.astext()))
raise nodes.SkipNode
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
depart_contact = depart_docinfo_item
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
depart_danger = depart_admonition
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
pass
def visit_definition_list(self, node):
self.indent(DEFINITION_LIST_INDENT)
def depart_definition_list(self, node):
self.dedent()
def visit_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][0])
def depart_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][1])
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self._in_docinfo = 1
def depart_docinfo(self, node):
self._in_docinfo = None
# NOTE nothing should be written before this
self.append_header()
def visit_doctest_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_doctest_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_document(self, node):
# no blank line between comment and header.
self.body.append(self.comment(self.document_start).rstrip()+'\n')
# writing header is postboned
self.header_written = 0
def depart_document(self, node):
if self._docinfo['author']:
self.body.append('.SH AUTHOR\n%s\n'
% ', '.join(self._docinfo['author']))
skip = ('author', 'copyright', 'date',
'manual_group', 'manual_section',
'subtitle',
'title', 'title_upper', 'version')
for name in self._docinfo_keys:
if name == 'address':
self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
self.language.labels.get(name, name),
self.defs['indent'][0] % 0,
self.defs['indent'][0] % BLOCKQOUTE_INDENT,
self._docinfo[name],
self.defs['indent'][1],
self.defs['indent'][1],
) )
elif not name in skip:
if name in self._docinfo_names:
label = self._docinfo_names[name]
else:
label = self.language.labels.get(name, name)
self.body.append("\n%s: %s\n" % (label, self._docinfo[name]) )
if self._docinfo['copyright']:
self.body.append('.SH COPYRIGHT\n%s\n'
% self._docinfo['copyright'])
self.body.append( self.comment(
'Generated by docutils manpage writer.\n' ) )
def visit_emphasis(self, node):
self.body.append(self.defs['emphasis'][0])
def depart_emphasis(self, node):
self.body.append(self.defs['emphasis'][1])
def visit_entry(self, node):
# a cell in a table row
if 'morerows' in node:
self.document.reporter.warning('"table row spanning" not supported',
base_node=node)
if 'morecols' in node:
self.document.reporter.warning(
'"table cell spanning" not supported', base_node=node)
self.context.append(len(self.body))
def depart_entry(self, node):
start = self.context.pop()
self._active_table.append_cell(self.body[start:])
del self.body[start:]
def visit_enumerated_list(self, node):
self.list_start(node)
def depart_enumerated_list(self, node):
self.list_end()
def visit_error(self, node):
self.visit_admonition(node, 'error')
depart_error = depart_admonition
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_body(self, node):
if self._in_docinfo:
name_normalized = self._field_name.lower().replace(" ","_")
self._docinfo_names[name_normalized] = self._field_name
self.visit_docinfo_item(node, name_normalized)
raise nodes.SkipNode
def depart_field_body(self, node):
pass
def visit_field_list(self, node):
self.indent(FIELD_LIST_INDENT)
def depart_field_list(self, node):
self.dedent()
def visit_field_name(self, node):
if self._in_docinfo:
self._field_name = node.astext()
raise nodes.SkipNode
else:
self.body.append(self.defs['field_name'][0])
def depart_field_name(self, node):
self.body.append(self.defs['field_name'][1])
def visit_figure(self, node):
self.indent(2.5)
self.indent(0)
def depart_figure(self, node):
self.dedent()
self.dedent()
def visit_footer(self, node):
self.document.reporter.warning('"footer" not supported',
base_node=node)
def depart_footer(self, node):
pass
def visit_footnote(self, node):
num,text = node.astext().split(None,1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % self.deunicode(num))
def depart_footnote(self, node):
pass
def footnote_backrefs(self, node):
self.document.reporter.warning('"footnote_backrefs" not supported',
base_node=node)
def visit_footnote_reference(self, node):
self.body.append('['+self.deunicode(node.astext())+']')
raise nodes.SkipNode
def depart_footnote_reference(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
raise NotImplementedError, node.astext()
def depart_header(self, node):
pass
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
depart_hint = depart_admonition
def visit_subscript(self, node):
self.body.append('\\s-2\\d')
def depart_subscript(self, node):
self.body.append('\\u\\s0')
def visit_superscript(self, node):
self.body.append('\\s-2\\u')
def depart_superscript(self, node):
self.body.append('\\d\\s0')
def visit_attribution(self, node):
self.body.append('\\(em ')
def depart_attribution(self, node):
self.body.append('\n')
def visit_image(self, node):
self.document.reporter.warning('"image" not supported',
base_node=node)
text = []
if 'alt' in node.attributes:
text.append(node.attributes['alt'])
if 'uri' in node.attributes:
text.append(node.attributes['uri'])
self.body.append('[image: %s]\n' % ('/'.join(text)))
raise nodes.SkipNode
def visit_important(self, node):
self.visit_admonition(node, 'important')
depart_important = depart_admonition
def visit_label(self, node):
# footnote and citation
if (isinstance(node.parent, nodes.footnote)
or isinstance(node.parent, nodes.citation)):
raise nodes.SkipNode
self.document.reporter.warning('"unsupported "label"',
base_node=node)
self.body.append('[')
def depart_label(self, node):
self.body.append(']\n')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# WHAT should we use .INDENT, .UNINDENT ?
def visit_line_block(self, node):
self._line_block += 1
if self._line_block == 1:
self.body.append('.nf\n')
else:
self.body.append('.in +2\n')
def depart_line_block(self, node):
self._line_block -= 1
if self._line_block == 0:
self.body.append('.fi\n')
self.body.append('.sp\n')
else:
self.body.append('.in -2\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.body.append('\n')
def visit_list_item(self, node):
# man 7 man argues to use ".IP" instead of ".TP"
self.body.append('.IP %s %d\n' % (
self._list_char[-1].next(),
self._list_char[-1].get_width(),) )
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.body.append(self.defs['literal'][0])
def depart_literal(self, node):
self.body.append(self.defs['literal'][1])
def visit_literal_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_literal_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_meta(self, node):
raise NotImplementedError, node.astext()
def depart_meta(self, node):
pass
def visit_note(self, node):
self.visit_admonition(node, 'note')
depart_note = depart_admonition
def indent(self, by=0.5):
# if we are in a section ".SH" there already is a .RS
step = self._indent[-1]
self._indent.append(by)
self.body.append(self.defs['indent'][0] % step)
def dedent(self):
self._indent.pop()
self.body.append(self.defs['indent'][1])
def visit_option_list(self, node):
self.indent(OPTION_LIST_INDENT)
def depart_option_list(self, node):
self.dedent()
def visit_option_list_item(self, node):
# one item of the list
self.body.append(self.defs['option_list_item'][0])
def depart_option_list_item(self, node):
self.body.append(self.defs['option_list_item'][1])
def visit_option_group(self, node):
# as one option could have several forms it is a group
# options without parameter bold only, .B, -v
# options with parameter bold italic, .BI, -f file
#
# we do not know if .B or .BI
self.context.append('.B') # blind guess
self.context.append(len(self.body)) # to be able to insert later
self.context.append(0) # option counter
def depart_option_group(self, node):
self.context.pop() # the counter
start_position = self.context.pop()
text = self.body[start_position:]
del self.body[start_position:]
self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
def visit_option(self, node):
# each form of the option will be presented separately
if self.context[-1]>0:
self.body.append(', ')
if self.context[-3] == '.BI':
self.body.append('\\')
self.body.append(' ')
def depart_option(self, node):
self.context[-1] += 1
def visit_option_string(self, node):
# do not know if .B or .BI
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.context[-3] = '.BI' # bold/italic alternate
if node['delimiter'] != ' ':
self.body.append('\\fB%s ' % node['delimiter'] )
elif self.body[len(self.body)-1].endswith('='):
# a blank only means no blank in output, just changing font
self.body.append(' ')
else:
# blank backslash blank, switch font then a blank
self.body.append(' \\ ')
def depart_option_argument(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
pass
def visit_paragraph(self, node):
# ``.PP`` : Start standard indented paragraph.
# ``.LP`` : Start block paragraph, all except the first.
# ``.P [type]`` : Start paragraph type.
# NOTE dont use paragraph starts because they reset indentation.
# ``.sp`` is only vertical space
self.ensure_eol()
self.body.append('.sp\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_problematic(self, node):
self.body.append(self.defs['problematic'][0])
def depart_problematic(self, node):
self.body.append(self.defs['problematic'][1])
def visit_raw(self, node):
if node.get('format') == 'manpage':
self.body.append(node.astext() + "\n")
# Keep non-manpage raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
"""E.g. link or email address."""
self.body.append(self.defs['reference'][0])
def depart_reference(self, node):
self.body.append(self.defs['reference'][1])
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
depart_revision = depart_docinfo_item
def visit_row(self, node):
self._active_table.new_row()
def depart_row(self, node):
pass
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
depart_status = depart_docinfo_item
def visit_strong(self, node):
self.body.append(self.defs['strong'][0])
def depart_strong(self, node):
self.body.append(self.defs['strong'][1])
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.document.reporter.warning('"substitution_reference" not supported',
base_node=node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['strong'][0])
elif isinstance(node.parent, nodes.document):
self.visit_docinfo_item(node, 'subtitle')
elif isinstance(node.parent, nodes.section):
self.body.append(self.defs['strong'][0])
def depart_subtitle(self, node):
# document subtitle calls SkipNode
self.body.append(self.defs['strong'][1]+'\n.PP\n')
def visit_system_message(self, node):
# TODO add report_level
#if node['level'] < self.document.reporter['writer'].report_level:
# Level is too low to display:
# raise nodes.SkipNode
attr = {}
backref_text = ''
if node.hasattr('id'):
attr['name'] = node['id']
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
% (node['type'], node['level'], node['source'], line))
def depart_system_message(self, node):
pass
def visit_table(self, node):
self._active_table = Table()
def depart_table(self, node):
self.ensure_eol()
self.body.extend(self._active_table.as_list())
self._active_table = None
def visit_target(self, node):
# targets are in-document hyper targets, without any use for man-pages.
raise nodes.SkipNode
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append(self.defs['term'][0])
def depart_term(self, node):
self.body.append(self.defs['term'][1])
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
# MAYBE double line '='
pass
def depart_thead(self, node):
# MAYBE double line '='
pass
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
depart_tip = depart_admonition
def visit_title(self, node):
if isinstance(node.parent, nodes.topic):
self.body.append(self.defs['topic-title'][0])
elif isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['sidebar-title'][0])
elif isinstance(node.parent, nodes.admonition):
self.body.append('.IP "')
elif self.section_level == 0:
self._docinfo['title'] = node.astext()
# document title for .TH
self._docinfo['title_upper'] = node.astext().upper()
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH %s\n' % self.deunicode(node.astext().upper()))
raise nodes.SkipNode
else:
self.body.append('.SS ')
def depart_title(self, node):
if isinstance(node.parent, nodes.admonition):
self.body.append('"')
self.body.append('\n')
def visit_title_reference(self, node):
"""inline citation reference"""
self.body.append(self.defs['title_reference'][0])
def depart_title_reference(self, node):
self.body.append(self.defs['title_reference'][1])
def visit_topic(self, node):
pass
def depart_topic(self, node):
pass
def visit_sidebar(self, node):
pass
def depart_sidebar(self, node):
pass
def visit_rubric(self, node):
pass
def depart_rubric(self, node):
pass
def visit_transition(self, node):
# .PP Begin a new paragraph and reset prevailing indent.
# .sp N leaves N lines of blank space.
# .ce centers the next line
self.body.append('\n.sp\n.ce\n----\n')
def depart_transition(self, node):
self.body.append('\n.ce 0\n.sp\n')
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
depart_warning = depart_admonition
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# vim: set fileencoding=utf-8 et ts=4 ai :
| Python |
# $Id: __init__.py 5889 2009-04-01 20:00:21Z gbrandl $
# Authors: Chris Liechti <cliechti@gmx.net>;
# David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
S5/HTML Slideshow Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import docutils
from docutils import frontend, nodes, utils
from docutils.writers import html4css1
from docutils.parsers.rst import directives
from docutils._compat import b
themes_dir_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), 'themes'))
def find_theme(name):
# Where else to look for a theme?
# Check working dir? Destination dir? Config dir? Plugins dir?
path = os.path.join(themes_dir_path, name)
if not os.path.isdir(path):
raise docutils.ApplicationError(
'Theme directory not found: %r (path: %r)' % (name, path))
return path
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'S5 Slideshow Specific Options',
'For the S5/HTML writer, the --no-toc-backlinks option '
'(defined in General Docutils Options above) is the default, '
'and should not be changed.',
(('Specify an installed S5 theme by name. Overrides --theme-url. '
'The default theme name is "default". The theme files will be '
'copied into a "ui/<theme>" directory, in the same directory as the '
'destination file (output HTML). Note that existing theme files '
'will not be overwritten (unless --overwrite-theme-files is used).',
['--theme'],
{'default': 'default', 'metavar': '<name>',
'overrides': 'theme_url'}),
('Specify an S5 theme URL. The destination file (output HTML) will '
'link to this theme; nothing will be copied. Overrides --theme.',
['--theme-url'],
{'metavar': '<URL>', 'overrides': 'theme'}),
('Allow existing theme files in the ``ui/<theme>`` directory to be '
'overwritten. The default is not to overwrite theme files.',
['--overwrite-theme-files'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Keep existing theme files in the ``ui/<theme>`` directory; do not '
'overwrite any. This is the default.',
['--keep-theme-files'],
{'dest': 'overwrite_theme_files', 'action': 'store_false'}),
('Set the initial view mode to "slideshow" [default] or "outline".',
['--view-mode'],
{'choices': ['slideshow', 'outline'], 'default': 'slideshow',
'metavar': '<mode>'}),
('Normally hide the presentation controls in slideshow mode. '
'This is the default.',
['--hidden-controls'],
{'action': 'store_true', 'default': True,
'validator': frontend.validate_boolean}),
('Always show the presentation controls in slideshow mode. '
'The default is to hide the controls.',
['--visible-controls'],
{'dest': 'hidden_controls', 'action': 'store_false'}),
('Enable the current slide indicator ("1 / 15"). '
'The default is to disable it.',
['--current-slide'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Disable the current slide indicator. This is the default.',
['--no-current-slide'],
{'dest': 'current_slide', 'action': 'store_false'}),))
settings_default_overrides = {'toc_backlinks': 0}
config_section = 's5_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = S5HTMLTranslator
class S5HTMLTranslator(html4css1.HTMLTranslator):
s5_stylesheet_template = """\
<!-- configuration parameters -->
<meta name="defaultView" content="%(view_mode)s" />
<meta name="controlVis" content="%(control_visibility)s" />
<!-- style sheet links -->
<script src="%(path)s/slides.js" type="text/javascript"></script>
<link rel="stylesheet" href="%(path)s/slides.css"
type="text/css" media="projection" id="slideProj" />
<link rel="stylesheet" href="%(path)s/outline.css"
type="text/css" media="screen" id="outlineStyle" />
<link rel="stylesheet" href="%(path)s/print.css"
type="text/css" media="print" id="slidePrint" />
<link rel="stylesheet" href="%(path)s/opera.css"
type="text/css" media="projection" id="operaFix" />\n"""
# The script element must go in front of the link elements to
# avoid a flash of unstyled content (FOUC), reproducible with
# Firefox.
disable_current_slide = """
<style type="text/css">
#currentSlide {display: none;}
</style>\n"""
layout_template = """\
<div class="layout">
<div id="controls"></div>
<div id="currentSlide"></div>
<div id="header">
%(header)s
</div>
<div id="footer">
%(title)s%(footer)s
</div>
</div>\n"""
# <div class="topleft"></div>
# <div class="topright"></div>
# <div class="bottomleft"></div>
# <div class="bottomright"></div>
default_theme = 'default'
"""Name of the default theme."""
base_theme_file = '__base__'
"""Name of the file containing the name of the base theme."""
direct_theme_files = (
'slides.css', 'outline.css', 'print.css', 'opera.css', 'slides.js')
"""Names of theme files directly linked to in the output HTML"""
indirect_theme_files = (
's5-core.css', 'framing.css', 'pretty.css', 'blank.gif', 'iepngfix.htc')
"""Names of files used indirectly; imported or used by files in
`direct_theme_files`."""
required_theme_files = indirect_theme_files + direct_theme_files
"""Names of mandatory theme files."""
def __init__(self, *args):
html4css1.HTMLTranslator.__init__(self, *args)
#insert S5-specific stylesheet and script stuff:
self.theme_file_path = None
self.setup_theme()
view_mode = self.document.settings.view_mode
control_visibility = ('visible', 'hidden')[self.document.settings
.hidden_controls]
self.stylesheet.append(self.s5_stylesheet_template
% {'path': self.theme_file_path,
'view_mode': view_mode,
'control_visibility': control_visibility})
if not self.document.settings.current_slide:
self.stylesheet.append(self.disable_current_slide)
self.add_meta('<meta name="version" content="S5 1.1" />\n')
self.s5_footer = []
self.s5_header = []
self.section_count = 0
self.theme_files_copied = None
def setup_theme(self):
if self.document.settings.theme:
self.copy_theme()
elif self.document.settings.theme_url:
self.theme_file_path = self.document.settings.theme_url
else:
raise docutils.ApplicationError(
'No theme specified for S5/HTML writer.')
def copy_theme(self):
"""
Locate & copy theme files.
A theme may be explicitly based on another theme via a '__base__'
file. The default base theme is 'default'. Files are accumulated
from the specified theme, any base themes, and 'default'.
"""
settings = self.document.settings
path = find_theme(settings.theme)
theme_paths = [path]
self.theme_files_copied = {}
required_files_copied = {}
# This is a link (URL) in HTML, so we use "/", not os.sep:
self.theme_file_path = '%s/%s' % ('ui', settings.theme)
if settings._destination:
dest = os.path.join(
os.path.dirname(settings._destination), 'ui', settings.theme)
if not os.path.isdir(dest):
os.makedirs(dest)
else:
# no destination, so we can't copy the theme
return
default = 0
while path:
for f in os.listdir(path): # copy all files from each theme
if f == self.base_theme_file:
continue # ... except the "__base__" file
if ( self.copy_file(f, path, dest)
and f in self.required_theme_files):
required_files_copied[f] = 1
if default:
break # "default" theme has no base theme
# Find the "__base__" file in theme directory:
base_theme_file = os.path.join(path, self.base_theme_file)
# If it exists, read it and record the theme path:
if os.path.isfile(base_theme_file):
lines = open(base_theme_file).readlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
path = find_theme(line)
if path in theme_paths: # check for duplicates (cycles)
path = None # if found, use default base
else:
theme_paths.append(path)
break
else: # no theme name found
path = None # use default base
else: # no base theme file found
path = None # use default base
if not path:
path = find_theme(self.default_theme)
theme_paths.append(path)
default = 1
if len(required_files_copied) != len(self.required_theme_files):
# Some required files weren't found & couldn't be copied.
required = list(self.required_theme_files)
for f in required_files_copied.keys():
required.remove(f)
raise docutils.ApplicationError(
'Theme files not found: %s'
% ', '.join(['%r' % f for f in required]))
files_to_skip_pattern = re.compile(r'~$|\.bak$|#$|\.cvsignore$')
def copy_file(self, name, source_dir, dest_dir):
"""
Copy file `name` from `source_dir` to `dest_dir`.
Return 1 if the file exists in either `source_dir` or `dest_dir`.
"""
source = os.path.join(source_dir, name)
dest = os.path.join(dest_dir, name)
if dest in self.theme_files_copied:
return 1
else:
self.theme_files_copied[dest] = 1
if os.path.isfile(source):
if self.files_to_skip_pattern.search(source):
return None
settings = self.document.settings
if os.path.exists(dest) and not settings.overwrite_theme_files:
settings.record_dependencies.add(dest)
else:
src_file = open(source, 'rb')
src_data = src_file.read()
src_file.close()
dest_file = open(dest, 'wb')
dest_dir = dest_dir.replace(os.sep, '/')
dest_file.write(src_data.replace(
b('ui/default'),
dest_dir[dest_dir.rfind('ui/'):].encode(
sys.getfilesystemencoding())))
dest_file.close()
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
return 1
def depart_document(self, node):
header = ''.join(self.s5_header)
footer = ''.join(self.s5_footer)
title = ''.join(self.html_title).replace('<h1 class="title">', '<h1>')
layout = self.layout_template % {'header': header,
'title': title,
'footer': footer}
self.fragment.extend(self.body)
self.body_prefix.extend(layout)
self.body_prefix.append('<div class="presentation">\n')
self.body_prefix.append(
self.starttag({'classes': ['slide'], 'ids': ['slide0']}, 'div'))
if not self.section_count:
self.body.append('</div>\n')
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
def depart_footer(self, node):
start = self.context.pop()
self.s5_footer.append('<h2>')
self.s5_footer.extend(self.body[start:])
self.s5_footer.append('</h2>')
del self.body[start:]
def depart_header(self, node):
start = self.context.pop()
header = ['<div id="header">\n']
header.extend(self.body[start:])
header.append('\n</div>\n')
del self.body[start:]
self.s5_header.extend(header)
def visit_section(self, node):
if not self.section_count:
self.body.append('\n</div>\n')
self.section_count += 1
self.section_level += 1
if self.section_level > 1:
# dummy for matching div's
self.body.append(self.starttag(node, 'div', CLASS='section'))
else:
self.body.append(self.starttag(node, 'div', CLASS='slide'))
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.section):
level = self.section_level + self.initial_header_level - 1
if level == 1:
level = 2
tag = 'h%s' % level
self.body.append(self.starttag(node, tag, ''))
self.context.append('</%s>\n' % tag)
else:
html4css1.HTMLTranslator.visit_subtitle(self, node)
def visit_title(self, node):
html4css1.HTMLTranslator.visit_title(self, node)
| Python |
# $Id: pygmentsformatter.py 5853 2009-01-19 21:02:02Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
| Python |
# $Id: __init__.py 6325 2010-05-14 21:40:15Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Open Document Format (ODF) Writer.
"""
VERSION = '1.0a'
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import tempfile
import zipfile
from xml.dom import minidom
import time
import re
import StringIO
import inspect
import imp
import copy
import docutils
from docutils import frontend, nodes, utils, writers, languages
from docutils.parsers import rst
from docutils.readers import standalone
from docutils.transforms import references
WhichElementTree = ''
try:
# 1. Try to use lxml.
#from lxml import etree
#WhichElementTree = 'lxml'
raise ImportError('Ignoring lxml')
except ImportError, e:
try:
# 2. Try to use ElementTree from the Python standard library.
from xml.etree import ElementTree as etree
WhichElementTree = 'elementtree'
except ImportError, e:
try:
# 3. Try to use a version of ElementTree installed as a separate
# product.
from elementtree import ElementTree as etree
WhichElementTree = 'elementtree'
except ImportError, e:
s1 = 'Must install either a version of Python containing ' \
'ElementTree (Python version >=2.5) or install ElementTree.'
raise ImportError(s1)
#
# Import pygments and odtwriter pygments formatters if possible.
try:
import pygments
import pygments.lexers
from pygmentsformatter import OdtPygmentsProgFormatter, \
OdtPygmentsLaTeXFormatter
except ImportError, exp:
pygments = None
#
# Is the PIL imaging library installed?
try:
import Image
except ImportError, exp:
Image = None
## import warnings
## warnings.warn('importing IPShellEmbed', UserWarning)
## from IPython.Shell import IPShellEmbed
## args = ['-pdb', '-pi1', 'In <\\#>: ', '-pi2', ' .\\D.: ',
## '-po', 'Out<\\#>: ', '-nosep']
## ipshell = IPShellEmbed(args,
## banner = 'Entering IPython. Press Ctrl-D to exit.',
## exit_msg = 'Leaving Interpreter, back to program.')
#
# ElementTree does not support getparent method (lxml does).
# This wrapper class and the following support functions provide
# that support for the ability to get the parent of an element.
#
if WhichElementTree == 'elementtree':
class _ElementInterfaceWrapper(etree._ElementInterface):
def __init__(self, tag, attrib=None):
etree._ElementInterface.__init__(self, tag, attrib)
if attrib is None:
attrib = {}
self.parent = None
def setparent(self, parent):
self.parent = parent
def getparent(self):
return self.parent
#
# Constants and globals
SPACES_PATTERN = re.compile(r'( +)')
TABS_PATTERN = re.compile(r'(\t+)')
FILL_PAT1 = re.compile(r'^ +')
FILL_PAT2 = re.compile(r' {2,}')
TableStylePrefix = 'Table'
GENERATOR_DESC = 'Docutils.org/odf_odt'
NAME_SPACE_1 = 'urn:oasis:names:tc:opendocument:xmlns:office:1.0'
CONTENT_NAMESPACE_DICT = CNSD = {
# 'office:version': '1.0',
'chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'dom': 'http://www.w3.org/2001/xml-events',
'dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'math': 'http://www.w3.org/1998/Math/MathML',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'office': NAME_SPACE_1,
'ooo': 'http://openoffice.org/2004/office',
'oooc': 'http://openoffice.org/2004/calc',
'ooow': 'http://openoffice.org/2004/writer',
'presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xforms': 'http://www.w3.org/2002/xforms',
'xlink': 'http://www.w3.org/1999/xlink',
'xsd': 'http://www.w3.org/2001/XMLSchema',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
STYLES_NAMESPACE_DICT = SNSD = {
# 'office:version': '1.0',
'chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'dom': 'http://www.w3.org/2001/xml-events',
'dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'math': 'http://www.w3.org/1998/Math/MathML',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'office': NAME_SPACE_1,
'presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'ooo': 'http://openoffice.org/2004/office',
'oooc': 'http://openoffice.org/2004/calc',
'ooow': 'http://openoffice.org/2004/writer',
'script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xlink': 'http://www.w3.org/1999/xlink',
}
MANIFEST_NAMESPACE_DICT = MANNSD = {
'manifest': 'urn:oasis:names:tc:opendocument:xmlns:manifest:1.0',
}
META_NAMESPACE_DICT = METNSD = {
# 'office:version': '1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'office': NAME_SPACE_1,
'ooo': 'http://openoffice.org/2004/office',
'xlink': 'http://www.w3.org/1999/xlink',
}
#
# Attribute dictionaries for use with ElementTree (not lxml), which
# does not support use of nsmap parameter on Element() and SubElement().
CONTENT_NAMESPACE_ATTRIB = {
'office:version': '1.0',
'xmlns:chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:dom': 'http://www.w3.org/2001/xml-events',
'xmlns:dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'xmlns:draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xmlns:fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'xmlns:form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'xmlns:math': 'http://www.w3.org/1998/Math/MathML',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:oooc': 'http://openoffice.org/2004/calc',
'xmlns:ooow': 'http://openoffice.org/2004/writer',
'xmlns:script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'xmlns:style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'xmlns:svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'xmlns:table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'xmlns:text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xmlns:xforms': 'http://www.w3.org/2002/xforms',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
STYLES_NAMESPACE_ATTRIB = {
'office:version': '1.0',
'xmlns:chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:dom': 'http://www.w3.org/2001/xml-events',
'xmlns:dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'xmlns:draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xmlns:fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'xmlns:form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'xmlns:math': 'http://www.w3.org/1998/Math/MathML',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:oooc': 'http://openoffice.org/2004/calc',
'xmlns:ooow': 'http://openoffice.org/2004/writer',
'xmlns:script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'xmlns:style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'xmlns:svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'xmlns:table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'xmlns:text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
}
MANIFEST_NAMESPACE_ATTRIB = {
'xmlns:manifest': 'urn:oasis:names:tc:opendocument:xmlns:manifest:1.0',
}
META_NAMESPACE_ATTRIB = {
'office:version': '1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
}
#
# Functions
#
#
# ElementTree support functions.
# In order to be able to get the parent of elements, must use these
# instead of the functions with same name provided by ElementTree.
#
def Element(tag, attrib=None, nsmap=None, nsdict=CNSD):
if attrib is None:
attrib = {}
tag, attrib = fix_ns(tag, attrib, nsdict)
if WhichElementTree == 'lxml':
el = etree.Element(tag, attrib, nsmap=nsmap)
else:
el = _ElementInterfaceWrapper(tag, attrib)
return el
def SubElement(parent, tag, attrib=None, nsmap=None, nsdict=CNSD):
if attrib is None:
attrib = {}
tag, attrib = fix_ns(tag, attrib, nsdict)
if WhichElementTree == 'lxml':
el = etree.SubElement(parent, tag, attrib, nsmap=nsmap)
else:
el = _ElementInterfaceWrapper(tag, attrib)
parent.append(el)
el.setparent(parent)
return el
def fix_ns(tag, attrib, nsdict):
nstag = add_ns(tag, nsdict)
nsattrib = {}
for key, val in attrib.iteritems():
nskey = add_ns(key, nsdict)
nsattrib[nskey] = val
return nstag, nsattrib
def add_ns(tag, nsdict=CNSD):
if WhichElementTree == 'lxml':
nstag, name = tag.split(':')
ns = nsdict.get(nstag)
if ns is None:
raise RuntimeError, 'Invalid namespace prefix: %s' % nstag
tag = '{%s}%s' % (ns, name,)
return tag
def ToString(et):
outstream = StringIO.StringIO()
et.write(outstream)
s1 = outstream.getvalue()
outstream.close()
return s1
def escape_cdata(text):
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
ascii = ''
for char in text:
if ord(char) >= ord("\x7f"):
ascii += "&#x%X;" % ( ord(char), )
else:
ascii += char
return ascii
#
# Classes
#
WORD_SPLIT_PAT1 = re.compile(r'\b(\w*)\b\W*')
def split_words(line):
# We need whitespace at the end of the string for our regexpr.
line += ' '
words = []
pos1 = 0
mo = WORD_SPLIT_PAT1.search(line, pos1)
while mo is not None:
word = mo.groups()[0]
words.append(word)
pos1 = mo.end()
mo = WORD_SPLIT_PAT1.search(line, pos1)
return words
#
# Information about the indentation level for lists nested inside
# other contexts, e.g. dictionary lists.
class ListLevel(object):
def __init__(self, level, sibling_level=True, nested_level=True):
self.level = level
self.sibling_level = sibling_level
self.nested_level = nested_level
def set_sibling(self, sibling_level): self.sibling_level = sibling_level
def get_sibling(self): return self.sibling_level
def set_nested(self, nested_level): self.nested_level = nested_level
def get_nested(self): return self.nested_level
def set_level(self, level): self.level = level
def get_level(self): return self.level
class Writer(writers.Writer):
MIME_TYPE = 'application/vnd.oasis.opendocument.text'
EXTENSION = '.odt'
supported = ('odt', )
"""Formats this writer supports."""
default_stylesheet = 'styles' + EXTENSION
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'ODF-Specific Options',
None,
(
('Specify a stylesheet. '
'Default: "%s"' % default_stylesheet_path,
['--stylesheet'],
{
'default': default_stylesheet_path,
'dest': 'stylesheet'
}),
('Specify a configuration/mapping file relative to the '
'current working '
'directory for additional ODF options. '
'In particular, this file may contain a section named '
'"Formats" that maps default style names to '
'names to be used in the resulting output file allowing for '
'adhering to external standards. '
'For more info and the format of the configuration/mapping file, '
'see the odtwriter doc.',
['--odf-config-file'],
{'metavar': '<file>'}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'default': False,
'action': 'store_true',
'dest': 'cloak_email_addresses',
'validator': frontend.validate_boolean}),
('Do not obfuscate email addresses.',
['--no-cloak-email-addresses'],
{'default': False,
'action': 'store_false',
'dest': 'cloak_email_addresses',
'validator': frontend.validate_boolean}),
('Specify the thickness of table borders in thousands of a cm. '
'Default is 35.',
['--table-border-thickness'],
{'default': 35,
'validator': frontend.validate_nonnegative_int}),
('Add syntax highlighting in literal code blocks.',
['--add-syntax-highlighting'],
{'default': False,
'action': 'store_true',
'dest': 'add_syntax_highlighting',
'validator': frontend.validate_boolean}),
('Do not add syntax highlighting in literal code blocks. (default)',
['--no-syntax-highlighting'],
{'default': False,
'action': 'store_false',
'dest': 'add_syntax_highlighting',
'validator': frontend.validate_boolean}),
('Create sections for headers. (default)',
['--create-sections'],
{'default': True,
'action': 'store_true',
'dest': 'create_sections',
'validator': frontend.validate_boolean}),
('Do not create sections for headers.',
['--no-sections'],
{'default': True,
'action': 'store_false',
'dest': 'create_sections',
'validator': frontend.validate_boolean}),
('Create links.',
['--create-links'],
{'default': False,
'action': 'store_true',
'dest': 'create_links',
'validator': frontend.validate_boolean}),
('Do not create links. (default)',
['--no-links'],
{'default': False,
'action': 'store_false',
'dest': 'create_links',
'validator': frontend.validate_boolean}),
('Generate endnotes at end of document, not footnotes '
'at bottom of page.',
['--endnotes-end-doc'],
{'default': False,
'action': 'store_true',
'dest': 'endnotes_end_doc',
'validator': frontend.validate_boolean}),
('Generate footnotes at bottom of page, not endnotes '
'at end of document. (default)',
['--no-endnotes-end-doc'],
{'default': False,
'action': 'store_false',
'dest': 'endnotes_end_doc',
'validator': frontend.validate_boolean}),
('Generate a bullet list table of contents, not '
'an ODF/oowriter table of contents.',
['--generate-list-toc'],
{'default': True,
'action': 'store_false',
'dest': 'generate_oowriter_toc',
'validator': frontend.validate_boolean}),
('Generate an ODF/oowriter table of contents, not '
'a bullet list. (default)',
['--generate-oowriter-toc'],
{'default': True,
'action': 'store_true',
'dest': 'generate_oowriter_toc',
'validator': frontend.validate_boolean}),
('Specify the contents of an custom header line. '
'See odf_odt writer documentation for details '
'about special field character sequences.',
['--custom-odt-header'],
{ 'default': '',
'dest': 'custom_header',
}),
('Specify the contents of an custom footer line. '
'See odf_odt writer documentation for details '
'about special field character sequences.',
['--custom-odt-footer'],
{ 'default': '',
'dest': 'custom_footer',
}),
)
)
settings_defaults = {
'output_encoding_error_handler': 'xmlcharrefreplace',
}
relative_path_settings = (
'stylesheet_path',
)
config_section = 'opendocument odf writer'
config_section_dependencies = (
'writers',
)
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = ODFTranslator
def translate(self):
self.settings = self.document.settings
self.visitor = self.translator_class(self.document)
self.document.walkabout(self.visitor)
self.visitor.add_doc_title()
self.assemble_my_parts()
self.output = self.parts['whole']
def assemble_my_parts(self):
"""Assemble the `self.parts` dictionary. Extend in subclasses.
"""
writers.Writer.assemble_parts(self)
f = tempfile.NamedTemporaryFile()
zfile = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED)
content = self.visitor.content_astext()
self.write_zip_str(zfile, 'content.xml', content)
self.write_zip_str(zfile, 'mimetype', self.MIME_TYPE)
s1 = self.create_manifest()
self.write_zip_str(zfile, 'META-INF/manifest.xml', s1)
s1 = self.create_meta()
self.write_zip_str(zfile, 'meta.xml', s1)
s1 = self.get_stylesheet()
self.write_zip_str(zfile, 'styles.xml', s1)
s1 = self.get_settings()
self.write_zip_str(zfile, 'settings.xml', s1)
self.store_embedded_files(zfile)
zfile.close()
f.seek(0)
whole = f.read()
f.close()
self.parts['whole'] = whole
self.parts['encoding'] = self.document.settings.output_encoding
self.parts['version'] = docutils.__version__
def write_zip_str(self, zfile, name, bytes):
localtime = time.localtime(time.time())
zinfo = zipfile.ZipInfo(name, localtime)
# Add some standard UNIX file access permissions (-rw-r--r--).
zinfo.external_attr = (0x81a4 & 0xFFFF) << 16L
zinfo.compress_type = zipfile.ZIP_DEFLATED
zfile.writestr(zinfo, bytes)
def store_embedded_files(self, zfile):
embedded_files = self.visitor.get_embedded_file_list()
for source, destination in embedded_files:
if source is None:
continue
try:
# encode/decode
destination1 = destination.decode('latin-1').encode('utf-8')
zfile.write(source, destination1, zipfile.ZIP_STORED)
except OSError, e:
self.document.reporter.warning(
"Can't open file %s." % (source, ))
def get_settings(self):
"""
modeled after get_stylesheet
"""
stylespath = self.settings.stylesheet
zfile = zipfile.ZipFile(stylespath, 'r')
s1 = zfile.read('settings.xml')
zfile.close()
return s1
def get_stylesheet(self):
"""Retrieve the stylesheet from either a .xml file or from
a .odt (zip) file. Return the content as a string.
"""
stylespath = self.settings.stylesheet
ext = os.path.splitext(stylespath)[1]
if ext == '.xml':
stylesfile = open(stylespath, 'r')
s1 = stylesfile.read()
stylesfile.close()
elif ext == self.EXTENSION:
zfile = zipfile.ZipFile(stylespath, 'r')
s1 = zfile.read('styles.xml')
zfile.close()
else:
raise RuntimeError, 'stylesheet path (%s) must be %s or .xml file' %(stylespath, self.EXTENSION)
s1 = self.visitor.setup_page(s1)
return s1
def assemble_parts(self):
pass
def create_manifest(self):
if WhichElementTree == 'lxml':
root = Element('manifest:manifest',
nsmap=MANIFEST_NAMESPACE_DICT,
nsdict=MANIFEST_NAMESPACE_DICT,
)
else:
root = Element('manifest:manifest',
attrib=MANIFEST_NAMESPACE_ATTRIB,
nsdict=MANIFEST_NAMESPACE_DICT,
)
doc = etree.ElementTree(root)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': self.MIME_TYPE,
'manifest:full-path': '/',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'content.xml',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'styles.xml',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'meta.xml',
}, nsdict=MANNSD)
s1 = ToString(doc)
doc = minidom.parseString(s1)
s1 = doc.toprettyxml(' ')
return s1
def create_meta(self):
if WhichElementTree == 'lxml':
root = Element('office:document-meta',
nsmap=META_NAMESPACE_DICT,
nsdict=META_NAMESPACE_DICT,
)
else:
root = Element('office:document-meta',
attrib=META_NAMESPACE_ATTRIB,
nsdict=META_NAMESPACE_DICT,
)
doc = etree.ElementTree(root)
root = SubElement(root, 'office:meta', nsdict=METNSD)
el1 = SubElement(root, 'meta:generator', nsdict=METNSD)
el1.text = 'Docutils/rst2odf.py/%s' % (VERSION, )
s1 = os.environ.get('USER', '')
el1 = SubElement(root, 'meta:initial-creator', nsdict=METNSD)
el1.text = s1
s2 = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime())
el1 = SubElement(root, 'meta:creation-date', nsdict=METNSD)
el1.text = s2
el1 = SubElement(root, 'dc:creator', nsdict=METNSD)
el1.text = s1
el1 = SubElement(root, 'dc:date', nsdict=METNSD)
el1.text = s2
el1 = SubElement(root, 'dc:language', nsdict=METNSD)
el1.text = 'en-US'
el1 = SubElement(root, 'meta:editing-cycles', nsdict=METNSD)
el1.text = '1'
el1 = SubElement(root, 'meta:editing-duration', nsdict=METNSD)
el1.text = 'PT00M01S'
title = self.visitor.get_title()
el1 = SubElement(root, 'dc:title', nsdict=METNSD)
if title:
el1.text = title
else:
el1.text = '[no title]'
meta_dict = self.visitor.get_meta_dict()
keywordstr = meta_dict.get('keywords')
if keywordstr is not None:
keywords = split_words(keywordstr)
for keyword in keywords:
el1 = SubElement(root, 'meta:keyword', nsdict=METNSD)
el1.text = keyword
description = meta_dict.get('description')
if description is not None:
el1 = SubElement(root, 'dc:description', nsdict=METNSD)
el1.text = description
s1 = ToString(doc)
#doc = minidom.parseString(s1)
#s1 = doc.toprettyxml(' ')
return s1
# class ODFTranslator(nodes.SparseNodeVisitor):
class ODFTranslator(nodes.GenericNodeVisitor):
used_styles = (
'attribution', 'blockindent', 'blockquote', 'blockquote-bulletitem',
'blockquote-bulletlist', 'blockquote-enumitem', 'blockquote-enumlist',
'bulletitem', 'bulletlist',
'caption', 'legend',
'centeredtextbody', 'codeblock',
'codeblock-classname', 'codeblock-comment', 'codeblock-functionname',
'codeblock-keyword', 'codeblock-name', 'codeblock-number',
'codeblock-operator', 'codeblock-string', 'emphasis', 'enumitem',
'enumlist', 'epigraph', 'epigraph-bulletitem', 'epigraph-bulletlist',
'epigraph-enumitem', 'epigraph-enumlist', 'footer',
'footnote', 'citation',
'header', 'highlights', 'highlights-bulletitem',
'highlights-bulletlist', 'highlights-enumitem', 'highlights-enumlist',
'horizontalline', 'inlineliteral', 'quotation', 'rubric',
'strong', 'table-title', 'textbody', 'tocbulletlist', 'tocenumlist',
'title',
'subtitle',
'heading1',
'heading2',
'heading3',
'heading4',
'heading5',
'heading6',
'heading7',
'admon-attention-hdr',
'admon-attention-body',
'admon-caution-hdr',
'admon-caution-body',
'admon-danger-hdr',
'admon-danger-body',
'admon-error-hdr',
'admon-error-body',
'admon-generic-hdr',
'admon-generic-body',
'admon-hint-hdr',
'admon-hint-body',
'admon-important-hdr',
'admon-important-body',
'admon-note-hdr',
'admon-note-body',
'admon-tip-hdr',
'admon-tip-body',
'admon-warning-hdr',
'admon-warning-body',
'tableoption',
'tableoption.%c', 'tableoption.%c%d', 'Table%d', 'Table%d.%c',
'Table%d.%c%d',
'lineblock1',
'lineblock2',
'lineblock3',
'lineblock4',
'lineblock5',
'lineblock6',
'image', 'figureframe',
)
def __init__(self, document):
#nodes.SparseNodeVisitor.__init__(self, document)
nodes.GenericNodeVisitor.__init__(self, document)
self.settings = document.settings
self.format_map = { }
if self.settings.odf_config_file:
from ConfigParser import ConfigParser
parser = ConfigParser()
parser.read(self.settings.odf_config_file)
for rststyle, format in parser.items("Formats"):
if rststyle not in self.used_styles:
self.document.reporter.warning(
'Style "%s" is not a style used by odtwriter.' % (
rststyle, ))
self.format_map[rststyle] = format
self.section_level = 0
self.section_count = 0
# Create ElementTree content and styles documents.
if WhichElementTree == 'lxml':
root = Element(
'office:document-content',
nsmap=CONTENT_NAMESPACE_DICT,
)
else:
root = Element(
'office:document-content',
attrib=CONTENT_NAMESPACE_ATTRIB,
)
self.content_tree = etree.ElementTree(element=root)
self.current_element = root
SubElement(root, 'office:scripts')
SubElement(root, 'office:font-face-decls')
el = SubElement(root, 'office:automatic-styles')
self.automatic_styles = el
el = SubElement(root, 'office:body')
el = self.generate_content_element(el)
self.current_element = el
self.body_text_element = el
self.paragraph_style_stack = [self.rststyle('textbody'), ]
self.list_style_stack = []
self.table_count = 0
self.column_count = ord('A') - 1
self.trace_level = -1
self.optiontablestyles_generated = False
self.field_name = None
self.field_element = None
self.title = None
self.image_count = 0
self.image_style_count = 0
self.image_dict = {}
self.embedded_file_list = []
self.syntaxhighlighting = 1
self.syntaxhighlight_lexer = 'python'
self.header_content = []
self.footer_content = []
self.in_header = False
self.in_footer = False
self.blockstyle = ''
self.in_table_of_contents = False
self.table_of_content_index_body = None
self.list_level = 0
self.footnote_ref_dict = {}
self.footnote_list = []
self.footnote_chars_idx = 0
self.footnote_level = 0
self.pending_ids = [ ]
self.in_paragraph = False
self.found_doc_title = False
self.bumped_list_level_stack = []
self.meta_dict = {}
self.line_block_level = 0
self.line_indent_level = 0
self.citation_id = None
self.style_index = 0 # use to form unique style names
def add_doc_title(self):
text = self.settings.title
if text:
self.title = text
if not self.found_doc_title:
el = Element('text:p', attrib = {
'text:style-name': self.rststyle('title'),
})
el.text = text
self.body_text_element.insert(0, el)
def rststyle(self, name, parameters=( )):
"""
Returns the style name to use for the given style.
If `parameters` is given `name` must contain a matching number of ``%`` and
is used as a format expression with `parameters` as the value.
"""
name1 = name % parameters
stylename = self.format_map.get(name1, 'rststyle-%s' % name1)
return stylename
def generate_content_element(self, root):
return SubElement(root, 'office:text')
def setup_page(self, content):
root_el = etree.fromstring(content)
self.setup_paper(root_el)
if (len(self.header_content) > 0 or len(self.footer_content) > 0 or
self.settings.custom_header or self.settings.custom_footer):
self.add_header_footer(root_el)
new_content = etree.tostring(root_el)
return new_content
def setup_paper(self, root_el):
try:
fin = os.popen("paperconf -s 2> /dev/null")
w, h = map(float, fin.read().split())
fin.close()
except:
w, h = 612, 792 # default to Letter
def walk(el):
if el.tag == "{%s}page-layout-properties" % SNSD["style"] and \
not el.attrib.has_key("{%s}page-width" % SNSD["fo"]):
el.attrib["{%s}page-width" % SNSD["fo"]] = "%.3fpt" % w
el.attrib["{%s}page-height" % SNSD["fo"]] = "%.3fpt" % h
el.attrib["{%s}margin-left" % SNSD["fo"]] = \
el.attrib["{%s}margin-right" % SNSD["fo"]] = \
"%.3fpt" % (.1 * w)
el.attrib["{%s}margin-top" % SNSD["fo"]] = \
el.attrib["{%s}margin-bottom" % SNSD["fo"]] = \
"%.3fpt" % (.1 * h)
else:
for subel in el.getchildren(): walk(subel)
walk(root_el)
def add_header_footer(self, root_el):
automatic_styles = root_el.find(
'{%s}automatic-styles' % SNSD['office'])
path = '{%s}master-styles' % (NAME_SPACE_1, )
master_el = root_el.find(path)
if master_el is None:
return
path = '{%s}master-page' % (SNSD['style'], )
master_el = master_el.find(path)
if master_el is None:
return
el1 = master_el
if self.header_content or self.settings.custom_header:
if WhichElementTree == 'lxml':
el2 = SubElement(el1, 'style:header', nsdict=SNSD)
else:
el2 = SubElement(el1, 'style:header',
attrib=STYLES_NAMESPACE_ATTRIB,
nsdict=STYLES_NAMESPACE_DICT,
)
for el in self.header_content:
attrkey = add_ns('text:style-name', nsdict=SNSD)
el.attrib[attrkey] = self.rststyle('header')
el2.append(el)
if self.settings.custom_header:
elcustom = self.create_custom_headfoot(el2,
self.settings.custom_header, 'header', automatic_styles)
if self.footer_content or self.settings.custom_footer:
if WhichElementTree == 'lxml':
el2 = SubElement(el1, 'style:footer', nsdict=SNSD)
else:
el2 = SubElement(el1, 'style:footer',
attrib=STYLES_NAMESPACE_ATTRIB,
nsdict=STYLES_NAMESPACE_DICT,
)
for el in self.footer_content:
attrkey = add_ns('text:style-name', nsdict=SNSD)
el.attrib[attrkey] = self.rststyle('footer')
el2.append(el)
if self.settings.custom_footer:
elcustom = self.create_custom_headfoot(el2,
self.settings.custom_footer, 'footer', automatic_styles)
code_none, code_field, code_text = range(3)
field_pat = re.compile(r'%(..?)%')
def create_custom_headfoot(self, parent, text, style_name, automatic_styles):
current_element = None
field_iter = self.split_field_specifiers_iter(text)
for item in field_iter:
if item[0] == ODFTranslator.code_field:
if item[1] not in ('p', 'P',
't1', 't2', 't3', 't4',
'd1', 'd2', 'd3', 'd4', 'd5',
's', 't', 'a'):
msg = 'bad field spec: %%%s%%' % (item[1], )
raise RuntimeError, msg
if current_element is None:
parent = SubElement(parent, 'text:p', attrib={
'text:style-name': self.rststyle(style_name),
})
el1 = self.make_field_element(parent,
item[1], style_name, automatic_styles)
if el1 is None:
msg = 'bad field spec: %%%s%%' % (item[1], )
raise RuntimeError, msg
else:
current_element = el1
else:
if current_element is None:
parent = SubElement(parent, 'text:p', attrib={
'text:style-name': self.rststyle(style_name),
})
parent.text = item[1]
else:
current_element.tail = item[1]
def make_field_element(self, parent, text, style_name, automatic_styles):
if text == 'p':
el1 = SubElement(parent, 'text:page-number', attrib={
'text:style-name': self.rststyle(style_name),
'text:select-page': 'current',
})
elif text == 'P':
el1 = SubElement(parent, 'text:page-count', attrib={
'text:style-name': self.rststyle(style_name),
})
elif text == 't1':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
elif text == 't2':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:seconds', attrib={
'number:style': 'long',
})
elif text == 't3':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:am-pm')
elif text == 't4':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:seconds', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:am-pm')
elif text == 'd1':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:day', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:year')
elif text == 'd2':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:day', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
elif text == 'd3':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:textual': 'true',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:day', attrib={
})
el3 = SubElement(el2, 'number:text')
el3.text = ', '
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
elif text == 'd4':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:textual': 'true',
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:day', attrib={
})
el3 = SubElement(el2, 'number:text')
el3.text = ', '
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
elif text == 'd5':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '-'
el3 = SubElement(el2, 'number:month', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '-'
el3 = SubElement(el2, 'number:day', attrib={
'number:style': 'long',
})
elif text == 's':
el1 = SubElement(parent, 'text:subject', attrib={
'text:style-name': self.rststyle(style_name),
})
elif text == 't':
el1 = SubElement(parent, 'text:title', attrib={
'text:style-name': self.rststyle(style_name),
})
elif text == 'a':
el1 = SubElement(parent, 'text:author-name', attrib={
'text:fixed': 'false',
})
else:
el1 = None
return el1
def split_field_specifiers_iter(self, text):
pos1 = 0
pos_end = len(text)
while True:
mo = ODFTranslator.field_pat.search(text, pos1)
if mo:
pos2 = mo.start()
if pos2 > pos1:
yield (ODFTranslator.code_text, text[pos1:pos2])
yield (ODFTranslator.code_field, mo.group(1))
pos1 = mo.end()
else:
break
trailing = text[pos1:]
if trailing:
yield (ODFTranslator.code_text, trailing)
def astext(self):
root = self.content_tree.getroot()
et = etree.ElementTree(root)
s1 = ToString(et)
return s1
def content_astext(self):
return self.astext()
def set_title(self, title): self.title = title
def get_title(self): return self.title
def set_embedded_file_list(self, embedded_file_list):
self.embedded_file_list = embedded_file_list
def get_embedded_file_list(self): return self.embedded_file_list
def get_meta_dict(self): return self.meta_dict
def process_footnotes(self):
for node, el1 in self.footnote_list:
backrefs = node.attributes.get('backrefs', [])
first = True
for ref in backrefs:
el2 = self.footnote_ref_dict.get(ref)
if el2 is not None:
if first:
first = False
el3 = copy.deepcopy(el1)
el2.append(el3)
else:
children = el2.getchildren()
if len(children) > 0: # and 'id' in el2.attrib:
child = children[0]
ref1 = child.text
attribkey = add_ns('text:id', nsdict=SNSD)
id1 = el2.get(attribkey, 'footnote-error')
if id1 is None:
id1 = ''
tag = add_ns('text:note-ref', nsdict=SNSD)
el2.tag = tag
if self.settings.endnotes_end_doc:
note_class = 'endnote'
else:
note_class = 'footnote'
el2.attrib.clear()
attribkey = add_ns('text:note-class', nsdict=SNSD)
el2.attrib[attribkey] = note_class
attribkey = add_ns('text:ref-name', nsdict=SNSD)
el2.attrib[attribkey] = id1
attribkey = add_ns('text:reference-format', nsdict=SNSD)
el2.attrib[attribkey] = 'page'
el2.text = ref1
#
# Utility methods
def append_child(self, tag, attrib=None, parent=None):
if parent is None:
parent = self.current_element
if attrib is None:
el = SubElement(parent, tag)
else:
el = SubElement(parent, tag, attrib)
return el
def append_p(self, style, text=None):
result = self.append_child('text:p', attrib={
'text:style-name': self.rststyle(style)})
self.append_pending_ids(result)
if text is not None:
result.text = text
return result
def append_pending_ids(self, el):
if self.settings.create_links:
for id in self.pending_ids:
SubElement(el, 'text:reference-mark', attrib={
'text:name': id})
self.pending_ids = [ ]
def set_current_element(self, el):
self.current_element = el
def set_to_parent(self):
self.current_element = self.current_element.getparent()
def generate_labeled_block(self, node, label):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
el = self.append_p('blockindent')
return el
def generate_labeled_line(self, node, label):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
el1.tail = node.astext()
return el
def encode(self, text):
text = text.replace(u'\u00a0', " ")
return text
#
# Visitor functions
#
# In alphabetic order, more or less.
# See docutils.docutils.nodes.node_class_names.
#
def dispatch_visit(self, node):
"""Override to catch basic attributes which many nodes have."""
self.handle_basic_atts(node)
nodes.GenericNodeVisitor.dispatch_visit(self, node)
def handle_basic_atts(self, node):
if isinstance(node, nodes.Element) and node['ids']:
self.pending_ids += node['ids']
def default_visit(self, node):
self.document.reporter.warning('missing visit_%s' % (node.tagname, ))
def default_departure(self, node):
self.document.reporter.warning('missing depart_%s' % (node.tagname, ))
def visit_Text(self, node):
# Skip nodes whose text has been processed in parent nodes.
if isinstance(node.parent, docutils.nodes.literal_block):
return
text = node.astext()
# Are we in mixed content? If so, add the text to the
# etree tail of the previous sibling element.
if len(self.current_element.getchildren()) > 0:
if self.current_element.getchildren()[-1].tail:
self.current_element.getchildren()[-1].tail += text
else:
self.current_element.getchildren()[-1].tail = text
else:
if self.current_element.text:
self.current_element.text += text
else:
self.current_element.text = text
def depart_Text(self, node):
pass
#
# Pre-defined fields
#
def visit_address(self, node):
el = self.generate_labeled_block(node, 'Address: ')
self.set_current_element(el)
def depart_address(self, node):
self.set_to_parent()
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
el = self.append_p('blockindent')
else:
el = self.generate_labeled_block(node, 'Author: ')
self.set_current_element(el)
def depart_author(self, node):
self.set_to_parent()
def visit_authors(self, node):
label = 'Authors:'
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
def depart_authors(self, node):
pass
def visit_contact(self, node):
el = self.generate_labeled_block(node, 'Contact: ')
self.set_current_element(el)
def depart_contact(self, node):
self.set_to_parent()
def visit_copyright(self, node):
el = self.generate_labeled_block(node, 'Copyright: ')
self.set_current_element(el)
def depart_copyright(self, node):
self.set_to_parent()
def visit_date(self, node):
self.generate_labeled_line(node, 'Date: ')
def depart_date(self, node):
pass
def visit_organization(self, node):
el = self.generate_labeled_block(node, 'Organization: ')
self.set_current_element(el)
def depart_organization(self, node):
self.set_to_parent()
def visit_status(self, node):
el = self.generate_labeled_block(node, 'Status: ')
self.set_current_element(el)
def depart_status(self, node):
self.set_to_parent()
def visit_revision(self, node):
self.generate_labeled_line(node, 'Revision: ')
def depart_revision(self, node):
pass
def visit_version(self, node):
el = self.generate_labeled_line(node, 'Version: ')
#self.set_current_element(el)
def depart_version(self, node):
#self.set_to_parent()
pass
def visit_attribution(self, node):
el = self.append_p('attribution', node.astext())
def depart_attribution(self, node):
pass
def visit_block_quote(self, node):
if 'epigraph' in node.attributes['classes']:
self.paragraph_style_stack.append(self.rststyle('epigraph'))
self.blockstyle = self.rststyle('epigraph')
elif 'highlights' in node.attributes['classes']:
self.paragraph_style_stack.append(self.rststyle('highlights'))
self.blockstyle = self.rststyle('highlights')
else:
self.paragraph_style_stack.append(self.rststyle('blockquote'))
self.blockstyle = self.rststyle('blockquote')
self.line_indent_level += 1
def depart_block_quote(self, node):
self.paragraph_style_stack.pop()
self.blockstyle = ''
self.line_indent_level -= 1
def visit_bullet_list(self, node):
self.list_level +=1
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
pass
else:
if node.has_key('classes') and \
'auto-toc' in node.attributes['classes']:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('tocenumlist'),
})
self.list_style_stack.append(self.rststyle('enumitem'))
else:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('tocbulletlist'),
})
self.list_style_stack.append(self.rststyle('bulletitem'))
self.set_current_element(el)
else:
if self.blockstyle == self.rststyle('blockquote'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('blockquote-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('blockquote-bulletitem'))
elif self.blockstyle == self.rststyle('highlights'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('highlights-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('highlights-bulletitem'))
elif self.blockstyle == self.rststyle('epigraph'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('epigraph-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('epigraph-bulletitem'))
else:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('bulletlist'),
})
self.list_style_stack.append(self.rststyle('bulletitem'))
self.set_current_element(el)
def depart_bullet_list(self, node):
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
pass
else:
self.set_to_parent()
self.list_style_stack.pop()
else:
self.set_to_parent()
self.list_style_stack.pop()
self.list_level -=1
def visit_caption(self, node):
raise nodes.SkipChildren()
pass
def depart_caption(self, node):
pass
def visit_comment(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'office:annotation', attrib={})
el2 = SubElement(el1, 'text:p', attrib={})
el2.text = node.astext()
def depart_comment(self, node):
pass
def visit_compound(self, node):
# The compound directive currently receives no special treatment.
pass
def depart_compound(self, node):
pass
def visit_container(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
self.paragraph_style_stack.append(self.rststyle(styles[0]))
def depart_container(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
self.paragraph_style_stack.pop()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.paragraph_style_stack.append(self.rststyle('blockindent'))
self.bumped_list_level_stack.append(ListLevel(1))
def depart_definition(self, node):
self.paragraph_style_stack.pop()
self.bumped_list_level_stack.pop()
def visit_definition_list(self, node):
pass
def depart_definition_list(self, node):
pass
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
#el1.text = node.astext()
self.set_current_element(el1)
def depart_term(self, node):
self.set_to_parent()
self.set_to_parent()
def visit_classifier(self, node):
els = self.current_element.getchildren()
if len(els) > 0:
el = els[-1]
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('emphasis')
})
el1.text = ' (%s)' % (node.astext(), )
def depart_classifier(self, node):
pass
def visit_document(self, node):
pass
def depart_document(self, node):
self.process_footnotes()
def visit_docinfo(self, node):
self.section_level += 1
self.section_count += 1
if self.settings.create_sections:
el = self.append_child('text:section', attrib={
'text:name': 'Section%d' % self.section_count,
'text:style-name': 'Sect%d' % self.section_level,
})
self.set_current_element(el)
def depart_docinfo(self, node):
self.section_level -= 1
if self.settings.create_sections:
self.set_to_parent()
def visit_emphasis(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('emphasis')})
self.set_current_element(el)
def depart_emphasis(self, node):
self.set_to_parent()
def visit_enumerated_list(self, node):
el1 = self.current_element
if self.blockstyle == self.rststyle('blockquote'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('blockquote-enumlist'),
})
self.list_style_stack.append(self.rststyle('blockquote-enumitem'))
elif self.blockstyle == self.rststyle('highlights'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('highlights-enumlist'),
})
self.list_style_stack.append(self.rststyle('highlights-enumitem'))
elif self.blockstyle == self.rststyle('epigraph'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('epigraph-enumlist'),
})
self.list_style_stack.append(self.rststyle('epigraph-enumitem'))
else:
liststylename = 'enumlist-%s' % (node.get('enumtype', 'arabic'), )
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle(liststylename),
})
self.list_style_stack.append(self.rststyle('enumitem'))
self.set_current_element(el2)
def depart_enumerated_list(self, node):
self.set_to_parent()
self.list_style_stack.pop()
def visit_list_item(self, node):
# If we are in a "bumped" list level, then wrap this
# list in an outer lists in order to increase the
# indentation level.
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
self.paragraph_style_stack.append(
self.rststyle('contents-%d' % (self.list_level, )))
else:
el1 = self.append_child('text:list-item')
self.set_current_element(el1)
else:
el1 = self.append_child('text:list-item')
el3 = el1
if len(self.bumped_list_level_stack) > 0:
level_obj = self.bumped_list_level_stack[-1]
if level_obj.get_sibling():
level_obj.set_nested(False)
for level_obj1 in self.bumped_list_level_stack:
for idx in range(level_obj1.get_level()):
el2 = self.append_child('text:list', parent=el3)
el3 = self.append_child(
'text:list-item', parent=el2)
self.paragraph_style_stack.append(self.list_style_stack[-1])
self.set_current_element(el3)
def depart_list_item(self, node):
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
self.paragraph_style_stack.pop()
else:
self.set_to_parent()
else:
if len(self.bumped_list_level_stack) > 0:
level_obj = self.bumped_list_level_stack[-1]
if level_obj.get_sibling():
level_obj.set_nested(True)
for level_obj1 in self.bumped_list_level_stack:
for idx in range(level_obj1.get_level()):
self.set_to_parent()
self.set_to_parent()
self.paragraph_style_stack.pop()
self.set_to_parent()
def visit_header(self, node):
self.in_header = True
def depart_header(self, node):
self.in_header = False
def visit_footer(self, node):
self.in_footer = True
def depart_footer(self, node):
self.in_footer = False
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field_name(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = node.astext()
def depart_field_name(self, node):
pass
def visit_field_body(self, node):
self.paragraph_style_stack.append(self.rststyle('blockindent'))
def depart_field_body(self, node):
self.paragraph_style_stack.pop()
def visit_figure(self, node):
pass
def depart_figure(self, node):
pass
def visit_footnote(self, node):
self.footnote_level += 1
self.save_footnote_current = self.current_element
el1 = Element('text:note-body')
self.current_element = el1
self.footnote_list.append((node, el1))
if isinstance(node, docutils.nodes.citation):
self.paragraph_style_stack.append(self.rststyle('citation'))
else:
self.paragraph_style_stack.append(self.rststyle('footnote'))
def depart_footnote(self, node):
self.paragraph_style_stack.pop()
self.current_element = self.save_footnote_current
self.footnote_level -= 1
footnote_chars = [
'*', '**', '***',
'++', '+++',
'##', '###',
'@@', '@@@',
]
def visit_footnote_reference(self, node):
if self.footnote_level <= 0:
id = node.attributes['ids'][0]
refid = node.attributes.get('refid')
if refid is None:
refid = ''
if self.settings.endnotes_end_doc:
note_class = 'endnote'
else:
note_class = 'footnote'
el1 = self.append_child('text:note', attrib={
'text:id': '%s' % (refid, ),
'text:note-class': note_class,
})
note_auto = str(node.attributes.get('auto', 1))
if isinstance(node, docutils.nodes.citation_reference):
citation = '[%s]' % node.astext()
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': citation,
})
el2.text = citation
elif note_auto == '1':
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': node.astext(),
})
el2.text = node.astext()
elif note_auto == '*':
if self.footnote_chars_idx >= len(
ODFTranslator.footnote_chars):
self.footnote_chars_idx = 0
footnote_char = ODFTranslator.footnote_chars[
self.footnote_chars_idx]
self.footnote_chars_idx += 1
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': footnote_char,
})
el2.text = footnote_char
self.footnote_ref_dict[id] = el1
raise nodes.SkipChildren()
def depart_footnote_reference(self, node):
pass
def visit_citation(self, node):
for id in node.attributes['ids']:
self.citation_id = id
break
self.paragraph_style_stack.append(self.rststyle('blockindent'))
self.bumped_list_level_stack.append(ListLevel(1))
def depart_citation(self, node):
self.citation_id = None
self.paragraph_style_stack.pop()
self.bumped_list_level_stack.pop()
def visit_citation_reference(self, node):
if self.settings.create_links:
id = node.attributes['refid']
el = self.append_child('text:reference-ref', attrib={
'text:ref-name': '%s' % (id, ),
'text:reference-format': 'text',
})
el.text = '['
self.set_current_element(el)
elif self.current_element.text is None:
self.current_element.text = '['
else:
self.current_element.text += '['
def depart_citation_reference(self, node):
self.current_element.text += ']'
if self.settings.create_links:
self.set_to_parent()
def visit_label(self, node):
if isinstance(node.parent, docutils.nodes.footnote):
raise nodes.SkipChildren()
elif self.citation_id is not None:
el = self.append_p('textbody')
self.set_current_element(el)
el.text = '['
if self.settings.create_links:
el1 = self.append_child('text:reference-mark-start', attrib={
'text:name': '%s' % (self.citation_id, ),
})
def depart_label(self, node):
if isinstance(node.parent, docutils.nodes.footnote):
pass
elif self.citation_id is not None:
self.current_element.text += ']'
if self.settings.create_links:
el = self.append_child('text:reference-mark-end', attrib={
'text:name': '%s' % (self.citation_id, ),
})
self.set_to_parent()
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def check_file_exists(self, path):
if os.path.exists(path):
return 1
else:
return 0
def visit_image(self, node):
# Capture the image file.
if 'uri' in node.attributes:
source = node.attributes['uri']
if not self.check_file_exists(source):
self.document.reporter.warning(
'Cannot find image file %s.' % (source, ))
return
else:
return
if source in self.image_dict:
filename, destination = self.image_dict[source]
else:
self.image_count += 1
filename = os.path.split(source)[1]
destination = 'Pictures/1%08x%s' % (self.image_count, filename, )
spec = (os.path.abspath(source), destination,)
self.embedded_file_list.append(spec)
self.image_dict[source] = (source, destination,)
# Is this a figure (containing an image) or just a plain image?
if self.in_paragraph:
el1 = self.current_element
else:
el1 = SubElement(self.current_element, 'text:p',
attrib={'text:style-name': self.rststyle('textbody')})
el2 = el1
if isinstance(node.parent, docutils.nodes.figure):
el3, el4, el5, caption = self.generate_figure(node, source,
destination, el2)
attrib = {}
el6, width = self.generate_image(node, source, destination,
el5, attrib)
if caption is not None:
el6.tail = caption
else: #if isinstance(node.parent, docutils.nodes.image):
el3 = self.generate_image(node, source, destination, el2)
def depart_image(self, node):
pass
def get_image_width_height(self, node, attr):
size = None
if attr in node.attributes:
size = node.attributes[attr]
unit = size[-2:]
if unit.isalpha():
size = size[:-2]
else:
unit = 'px'
try:
size = float(size)
except ValueError, e:
self.document.reporter.warning(
'Invalid %s for image: "%s"' % (
attr, node.attributes[attr]))
size = [size, unit]
return size
def get_image_scale(self, node):
if 'scale' in node.attributes:
try:
scale = int(node.attributes['scale'])
if scale < 1: # or scale > 100:
self.document.reporter.warning(
'scale out of range (%s), using 1.' % (scale, ))
scale = 1
scale = scale * 0.01
except ValueError, e:
self.document.reporter.warning(
'Invalid scale for image: "%s"' % (
node.attributes['scale'], ))
else:
scale = 1.0
return scale
def get_image_scaled_width_height(self, node, source):
scale = self.get_image_scale(node)
width = self.get_image_width_height(node, 'width')
height = self.get_image_width_height(node, 'height')
dpi = (72, 72)
if Image is not None and source in self.image_dict:
filename, destination = self.image_dict[source]
imageobj = Image.open(filename, 'r')
dpi = imageobj.info.get('dpi', dpi)
# dpi information can be (xdpi, ydpi) or xydpi
try: iter(dpi)
except: dpi = (dpi, dpi)
else:
imageobj = None
if width is None or height is None:
if imageobj is None:
raise RuntimeError(
'image size not fully specified and PIL not installed')
if width is None: width = [imageobj.size[0], 'px']
if height is None: height = [imageobj.size[1], 'px']
width[0] *= scale
height[0] *= scale
if width[1] == 'px': width = [width[0] / dpi[0], 'in']
if height[1] == 'px': height = [height[0] / dpi[1], 'in']
width[0] = str(width[0])
height[0] = str(height[0])
return ''.join(width), ''.join(height)
def generate_figure(self, node, source, destination, current_element):
caption = None
width, height = self.get_image_scaled_width_height(node, source)
for node1 in node.parent.children:
if node1.tagname == 'caption':
caption = node1.astext()
self.image_style_count += 1
#
# Add the style for the caption.
if caption is not None:
attrib = {
'style:class': 'extra',
'style:family': 'paragraph',
'style:name': 'Caption',
'style:parent-style-name': 'Standard',
}
el1 = SubElement(self.automatic_styles, 'style:style',
attrib=attrib, nsdict=SNSD)
attrib = {
'fo:margin-bottom': '0.0835in',
'fo:margin-top': '0.0835in',
'text:line-number': '0',
'text:number-lines': 'false',
}
el2 = SubElement(el1, 'style:paragraph-properties',
attrib=attrib, nsdict=SNSD)
attrib = {
'fo:font-size': '12pt',
'fo:font-style': 'italic',
'style:font-name': 'Times',
'style:font-name-complex': 'Lucidasans1',
'style:font-size-asian': '12pt',
'style:font-size-complex': '12pt',
'style:font-style-asian': 'italic',
'style:font-style-complex': 'italic',
}
el2 = SubElement(el1, 'style:text-properties',
attrib=attrib, nsdict=SNSD)
style_name = 'rstframestyle%d' % self.image_style_count
# Add the styles
attrib = {
'style:name': style_name,
'style:family': 'graphic',
'style:parent-style-name': self.rststyle('figureframe'),
}
el1 = SubElement(self.automatic_styles,
'style:style', attrib=attrib, nsdict=SNSD)
halign = 'center'
valign = 'top'
if 'align' in node.attributes:
align = node.attributes['align'].split()
for val in align:
if val in ('left', 'center', 'right'):
halign = val
elif val in ('top', 'middle', 'bottom'):
valign = val
attrib = {}
wrap = False
classes = node.parent.attributes.get('classes')
if classes and 'wrap' in classes:
wrap = True
if wrap:
attrib['style:wrap'] = 'dynamic'
else:
attrib['style:wrap'] = 'none'
el2 = SubElement(el1,
'style:graphic-properties', attrib=attrib, nsdict=SNSD)
attrib = {
'draw:style-name': style_name,
'draw:name': 'Frame1',
'text:anchor-type': 'paragraph',
'draw:z-index': '0',
}
attrib['svg:width'] = width
# dbg
#attrib['svg:height'] = height
el3 = SubElement(current_element, 'draw:frame', attrib=attrib)
attrib = {}
el4 = SubElement(el3, 'draw:text-box', attrib=attrib)
attrib = {
'text:style-name': self.rststyle('caption'),
}
el5 = SubElement(el4, 'text:p', attrib=attrib)
return el3, el4, el5, caption
def generate_image(self, node, source, destination, current_element,
frame_attrs=None):
width, height = self.get_image_scaled_width_height(node, source)
self.image_style_count += 1
style_name = 'rstframestyle%d' % self.image_style_count
# Add the style.
attrib = {
'style:name': style_name,
'style:family': 'graphic',
'style:parent-style-name': self.rststyle('image'),
}
el1 = SubElement(self.automatic_styles,
'style:style', attrib=attrib, nsdict=SNSD)
halign = None
valign = None
if 'align' in node.attributes:
align = node.attributes['align'].split()
for val in align:
if val in ('left', 'center', 'right'):
halign = val
elif val in ('top', 'middle', 'bottom'):
valign = val
if frame_attrs is None:
attrib = {
'style:vertical-pos': 'top',
'style:vertical-rel': 'paragraph',
'style:horizontal-rel': 'paragraph',
'style:mirror': 'none',
'fo:clip': 'rect(0cm 0cm 0cm 0cm)',
'draw:luminance': '0%',
'draw:contrast': '0%',
'draw:red': '0%',
'draw:green': '0%',
'draw:blue': '0%',
'draw:gamma': '100%',
'draw:color-inversion': 'false',
'draw:image-opacity': '100%',
'draw:color-mode': 'standard',
}
else:
attrib = frame_attrs
if halign is not None:
attrib['style:horizontal-pos'] = halign
if valign is not None:
attrib['style:vertical-pos'] = valign
# If there is a classes/wrap directive or we are
# inside a table, add a no-wrap style.
wrap = False
classes = node.attributes.get('classes')
if classes and 'wrap' in classes:
wrap = True
if wrap:
attrib['style:wrap'] = 'dynamic'
else:
attrib['style:wrap'] = 'none'
# If we are inside a table, add a no-wrap style.
if self.is_in_table(node):
attrib['style:wrap'] = 'none'
el2 = SubElement(el1,
'style:graphic-properties', attrib=attrib, nsdict=SNSD)
# Add the content.
#el = SubElement(current_element, 'text:p',
# attrib={'text:style-name': self.rststyle('textbody')})
attrib={
'draw:style-name': style_name,
'draw:name': 'graphics2',
'draw:z-index': '1',
}
if isinstance(node.parent, nodes.TextElement):
attrib['text:anchor-type'] = 'as-char' #vds
else:
attrib['text:anchor-type'] = 'paragraph'
attrib['svg:width'] = width
attrib['svg:height'] = height
el1 = SubElement(current_element, 'draw:frame', attrib=attrib)
el2 = SubElement(el1, 'draw:image', attrib={
'xlink:href': '%s' % (destination, ),
'xlink:type': 'simple',
'xlink:show': 'embed',
'xlink:actuate': 'onLoad',
})
return el1, width
def is_in_table(self, node):
node1 = node.parent
while node1:
if isinstance(node1, docutils.nodes.entry):
return True
node1 = node1.parent
return False
def visit_legend(self, node):
if isinstance(node.parent, docutils.nodes.figure):
el1 = self.current_element[-1]
el1 = el1[0][0]
self.current_element = el1
self.paragraph_style_stack.append(self.rststyle('legend'))
def depart_legend(self, node):
if isinstance(node.parent, docutils.nodes.figure):
self.paragraph_style_stack.pop()
self.set_to_parent()
self.set_to_parent()
self.set_to_parent()
def visit_line_block(self, node):
self.line_indent_level += 1
self.line_block_level += 1
def depart_line_block(self, node):
self.line_indent_level -= 1
self.line_block_level -= 1
def visit_line(self, node):
style = 'lineblock%d' % self.line_indent_level
el1 = SubElement(self.current_element, 'text:p', attrib={
'text:style-name': self.rststyle(style),
})
self.current_element = el1
def depart_line(self, node):
self.set_to_parent()
def visit_literal(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('inlineliteral')})
self.set_current_element(el)
def depart_literal(self, node):
self.set_to_parent()
def visit_inline(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
inline_style = styles[0]
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle(inline_style)})
self.set_current_element(el)
def depart_inline(self, node):
self.set_to_parent()
def _calculate_code_block_padding(self, line):
count = 0
matchobj = SPACES_PATTERN.match(line)
if matchobj:
pad = matchobj.group()
count = len(pad)
else:
matchobj = TABS_PATTERN.match(line)
if matchobj:
pad = matchobj.group()
count = len(pad) * 8
return count
def _add_syntax_highlighting(self, insource, language):
lexer = pygments.lexers.get_lexer_by_name(language, stripall=True)
if language in ('latex', 'tex'):
fmtr = OdtPygmentsLaTeXFormatter(lambda name, parameters=():
self.rststyle(name, parameters),
escape_function=escape_cdata)
else:
fmtr = OdtPygmentsProgFormatter(lambda name, parameters=():
self.rststyle(name, parameters),
escape_function=escape_cdata)
outsource = pygments.highlight(insource, lexer, fmtr)
return outsource
def fill_line(self, line):
line = FILL_PAT1.sub(self.fill_func1, line)
line = FILL_PAT2.sub(self.fill_func2, line)
return line
def fill_func1(self, matchobj):
spaces = matchobj.group(0)
repl = '<text:s text:c="%d"/>' % (len(spaces), )
return repl
def fill_func2(self, matchobj):
spaces = matchobj.group(0)
repl = ' <text:s text:c="%d"/>' % (len(spaces) - 1, )
return repl
def visit_literal_block(self, node):
wrapper1 = '<text:p text:style-name="%s">%%s</text:p>' % (
self.rststyle('codeblock'), )
source = node.astext()
if (pygments and
self.settings.add_syntax_highlighting
#and
#node.get('hilight', False)
):
language = node.get('language', 'python')
source = self._add_syntax_highlighting(source, language)
else:
source = escape_cdata(source)
lines = source.split('\n')
lines1 = ['<wrappertag1 xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">']
my_lines = []
for my_line in lines:
my_line = self.fill_line(my_line)
my_line = my_line.replace(" ", "\n")
my_lines.append(my_line)
my_lines_str = '<text:line-break/>'.join(my_lines)
my_lines_str2 = wrapper1 % (my_lines_str, )
lines1.append(my_lines_str2)
lines1.append('</wrappertag1>')
s1 = ''.join(lines1)
if WhichElementTree != "lxml":
s1 = s1.encode("utf-8")
el1 = etree.fromstring(s1)
children = el1.getchildren()
for child in children:
self.current_element.append(child)
def depart_literal_block(self, node):
pass
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_meta(self, node):
name = node.attributes.get('name')
content = node.attributes.get('content')
if name is not None and content is not None:
self.meta_dict[name] = content
def depart_meta(self, node):
pass
def visit_option_list(self, node):
table_name = 'tableoption'
#
# Generate automatic styles
if not self.optiontablestyles_generated:
self.optiontablestyles_generated = True
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(table_name),
'style:family': 'table'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-properties', attrib={
'style:width': '17.59cm',
'table:align': 'left',
'style:shadow': 'none'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle('%s.%%c' % table_name, ( 'A', )),
'style:family': 'table-column'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-column-properties', attrib={
'style:column-width': '4.999cm'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle('%s.%%c' % table_name, ( 'B', )),
'style:family': 'table-column'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-column-properties', attrib={
'style:column-width': '12.587cm'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 1, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:background-color': 'transparent',
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': 'none',
'fo:border-top': '0.035cm solid #000000',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
el2 = SubElement(el1, 'style:background-image', nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 1, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border': '0.035cm solid #000000'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 2, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': 'none',
'fo:border-top': 'none',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 2, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': '0.035cm solid #000000',
'fo:border-top': 'none',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
#
# Generate table data
el = self.append_child('table:table', attrib={
'table:name': self.rststyle(table_name),
'table:style-name': self.rststyle(table_name),
})
el1 = SubElement(el, 'table:table-column', attrib={
'table:style-name': self.rststyle(
'%s.%%c' % table_name, ( 'A', ))})
el1 = SubElement(el, 'table:table-column', attrib={
'table:style-name': self.rststyle(
'%s.%%c' % table_name, ( 'B', ))})
el1 = SubElement(el, 'table:table-header-rows')
el2 = SubElement(el1, 'table:table-row')
el3 = SubElement(el2, 'table:table-cell', attrib={
'table:style-name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 1, )),
'office:value-type': 'string'})
el4 = SubElement(el3, 'text:p', attrib={
'text:style-name': 'Table_20_Heading'})
el4.text= 'Option'
el3 = SubElement(el2, 'table:table-cell', attrib={
'table:style-name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 1, )),
'office:value-type': 'string'})
el4 = SubElement(el3, 'text:p', attrib={
'text:style-name': 'Table_20_Heading'})
el4.text= 'Description'
self.set_current_element(el)
def depart_option_list(self, node):
self.set_to_parent()
def visit_option_list_item(self, node):
el = self.append_child('table:table-row')
self.set_current_element(el)
def depart_option_list_item(self, node):
self.set_to_parent()
def visit_option_group(self, node):
el = self.append_child('table:table-cell', attrib={
'table:style-name': 'Table%d.A2' % self.table_count,
'office:value-type': 'string',
})
self.set_current_element(el)
def depart_option_group(self, node):
self.set_to_parent()
def visit_option(self, node):
el = self.append_child('text:p', attrib={
'text:style-name': 'Table_20_Contents'})
el.text = node.astext()
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
pass
def depart_option_argument(self, node):
pass
def visit_description(self, node):
el = self.append_child('table:table-cell', attrib={
'table:style-name': 'Table%d.B2' % self.table_count,
'office:value-type': 'string',
})
el1 = SubElement(el, 'text:p', attrib={
'text:style-name': 'Table_20_Contents'})
el1.text = node.astext()
raise nodes.SkipChildren()
def depart_description(self, node):
pass
def visit_paragraph(self, node):
self.in_paragraph = True
if self.in_header:
el = self.append_p('header')
elif self.in_footer:
el = self.append_p('footer')
else:
style_name = self.paragraph_style_stack[-1]
el = self.append_child('text:p',
attrib={'text:style-name': style_name})
self.append_pending_ids(el)
self.set_current_element(el)
def depart_paragraph(self, node):
self.in_paragraph = False
self.set_to_parent()
if self.in_header:
self.header_content.append(
self.current_element.getchildren()[-1])
self.current_element.remove(
self.current_element.getchildren()[-1])
elif self.in_footer:
self.footer_content.append(
self.current_element.getchildren()[-1])
self.current_element.remove(
self.current_element.getchildren()[-1])
def visit_problematic(self, node):
pass
def depart_problematic(self, node):
pass
def visit_raw(self, node):
if 'format' in node.attributes:
formats = node.attributes['format']
formatlist = formats.split()
if 'odt' in formatlist:
rawstr = node.astext()
attrstr = ' '.join(['%s="%s"' % (k, v, )
for k,v in CONTENT_NAMESPACE_ATTRIB.items()])
contentstr = '<stuff %s>%s</stuff>' % (attrstr, rawstr, )
if WhichElementTree != "lxml":
contentstr = contentstr.encode("utf-8")
content = etree.fromstring(contentstr)
elements = content.getchildren()
if len(elements) > 0:
el1 = elements[0]
if self.in_header:
pass
elif self.in_footer:
pass
else:
self.current_element.append(el1)
raise nodes.SkipChildren()
def depart_raw(self, node):
if self.in_header:
pass
elif self.in_footer:
pass
else:
pass
def visit_reference(self, node):
text = node.astext()
if self.settings.create_links:
if node.has_key('refuri'):
href = node['refuri']
if ( self.settings.cloak_email_addresses
and href.startswith('mailto:')):
href = self.cloak_mailto(href)
el = self.append_child('text:a', attrib={
'xlink:href': '%s' % href,
'xlink:type': 'simple',
})
self.set_current_element(el)
elif node.has_key('refid'):
if self.settings.create_links:
href = node['refid']
el = self.append_child('text:reference-ref', attrib={
'text:ref-name': '%s' % href,
'text:reference-format': 'text',
})
else:
self.document.reporter.warning(
'References must have "refuri" or "refid" attribute.')
if (self.in_table_of_contents and
len(node.children) >= 1 and
isinstance(node.children[0], docutils.nodes.generated)):
node.remove(node.children[0])
def depart_reference(self, node):
if self.settings.create_links:
if node.has_key('refuri'):
self.set_to_parent()
def visit_rubric(self, node):
style_name = self.rststyle('rubric')
classes = node.get('classes')
if classes:
class1 = classes[0]
if class1:
style_name = class1
el = SubElement(self.current_element, 'text:h', attrib = {
#'text:outline-level': '%d' % section_level,
#'text:style-name': 'Heading_20_%d' % section_level,
'text:style-name': style_name,
})
text = node.astext()
el.text = self.encode(text)
def depart_rubric(self, node):
pass
def visit_section(self, node, move_ids=1):
self.section_level += 1
self.section_count += 1
if self.settings.create_sections:
el = self.append_child('text:section', attrib={
'text:name': 'Section%d' % self.section_count,
'text:style-name': 'Sect%d' % self.section_level,
})
self.set_current_element(el)
def depart_section(self, node):
self.section_level -= 1
if self.settings.create_sections:
self.set_to_parent()
def visit_strong(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
self.set_current_element(el)
def depart_strong(self, node):
self.set_to_parent()
def visit_substitution_definition(self, node):
raise nodes.SkipChildren()
def depart_substitution_definition(self, node):
pass
def visit_system_message(self, node):
pass
def depart_system_message(self, node):
pass
def visit_table(self, node):
self.table_count += 1
table_name = '%s%%d' % TableStylePrefix
el1 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
'style:family': 'table',
}, nsdict=SNSD)
el1_1 = SubElement(el1, 'style:table-properties', attrib={
#'style:width': '17.59cm',
'table:align': 'margins',
'fo:margin-top': '0in',
'fo:margin-bottom': '0.10in',
}, nsdict=SNSD)
# We use a single cell style for all cells in this table.
# That's probably not correct, but seems to work.
el2 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( self.table_count, 'A', 1, )),
'style:family': 'table-cell',
}, nsdict=SNSD)
line_style1 = '0.%03dcm solid #000000' % (
self.settings.table_border_thickness, )
el2_1 = SubElement(el2, 'style:table-cell-properties', attrib={
'fo:padding': '0.049cm',
'fo:border-left': line_style1,
'fo:border-right': line_style1,
'fo:border-top': line_style1,
'fo:border-bottom': line_style1,
}, nsdict=SNSD)
title = None
for child in node.children:
if child.tagname == 'title':
title = child.astext()
break
if title is not None:
el3 = self.append_p('table-title', title)
else:
pass
el4 = SubElement(self.current_element, 'table:table', attrib={
'table:name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
'table:style-name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
})
self.set_current_element(el4)
self.current_table_style = el1
self.table_width = 0
def depart_table(self, node):
attribkey = add_ns('style:width', nsdict=SNSD)
attribval = '%dcm' % self.table_width
self.current_table_style.attrib[attribkey] = attribval
self.set_to_parent()
def visit_tgroup(self, node):
self.column_count = ord('A') - 1
def depart_tgroup(self, node):
pass
def visit_colspec(self, node):
self.column_count += 1
colspec_name = self.rststyle(
'%s%%d.%%s' % TableStylePrefix,
(self.table_count, chr(self.column_count), )
)
colwidth = node['colwidth']
el1 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': colspec_name,
'style:family': 'table-column',
}, nsdict=SNSD)
el1_1 = SubElement(el1, 'style:table-column-properties', attrib={
'style:column-width': '%dcm' % colwidth }, nsdict=SNSD)
el2 = self.append_child('table:table-column', attrib={
'table:style-name': colspec_name,
})
self.table_width += colwidth
def depart_colspec(self, node):
pass
def visit_thead(self, node):
el = self.append_child('table:table-header-rows')
self.set_current_element(el)
self.in_thead = True
self.paragraph_style_stack.append('Table_20_Heading')
def depart_thead(self, node):
self.set_to_parent()
self.in_thead = False
self.paragraph_style_stack.pop()
def visit_row(self, node):
self.column_count = ord('A') - 1
el = self.append_child('table:table-row')
self.set_current_element(el)
def depart_row(self, node):
self.set_to_parent()
def visit_entry(self, node):
self.column_count += 1
cellspec_name = self.rststyle(
'%s%%d.%%c%%d' % TableStylePrefix,
(self.table_count, 'A', 1, )
)
attrib={
'table:style-name': cellspec_name,
'office:value-type': 'string',
}
morecols = node.get('morecols', 0)
if morecols > 0:
attrib['table:number-columns-spanned'] = '%d' % (morecols + 1,)
self.column_count += morecols
morerows = node.get('morerows', 0)
if morerows > 0:
attrib['table:number-rows-spanned'] = '%d' % (morerows + 1,)
el1 = self.append_child('table:table-cell', attrib=attrib)
self.set_current_element(el1)
def depart_entry(self, node):
self.set_to_parent()
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_target(self, node):
#
# I don't know how to implement targets in ODF.
# How do we create a target in oowriter? A cross-reference?
if not (node.has_key('refuri') or node.has_key('refid')
or node.has_key('refname')):
pass
else:
pass
def depart_target(self, node):
pass
def visit_title(self, node, move_ids=1, title_type='title'):
if isinstance(node.parent, docutils.nodes.section):
section_level = self.section_level
if section_level > 7:
self.document.reporter.warning(
'Heading/section levels greater than 7 not supported.')
self.document.reporter.warning(
' Reducing to heading level 7 for heading: "%s"' % (
node.astext(), ))
section_level = 7
el1 = self.append_child('text:h', attrib = {
'text:outline-level': '%d' % section_level,
#'text:style-name': 'Heading_20_%d' % section_level,
'text:style-name': self.rststyle(
'heading%d', (section_level, )),
})
self.append_pending_ids(el1)
self.set_current_element(el1)
elif isinstance(node.parent, docutils.nodes.document):
# text = self.settings.title
#else:
# text = node.astext()
el1 = SubElement(self.current_element, 'text:p', attrib = {
'text:style-name': self.rststyle(title_type),
})
self.append_pending_ids(el1)
text = node.astext()
self.title = text
self.found_doc_title = True
self.set_current_element(el1)
def depart_title(self, node):
if (isinstance(node.parent, docutils.nodes.section) or
isinstance(node.parent, docutils.nodes.document)):
self.set_to_parent()
def visit_subtitle(self, node, move_ids=1):
self.visit_title(node, move_ids, title_type='subtitle')
def depart_subtitle(self, node):
self.depart_title(node)
def visit_title_reference(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': self.rststyle('quotation')})
el.text = self.encode(node.astext())
raise nodes.SkipChildren()
def depart_title_reference(self, node):
pass
def generate_table_of_content_entry_template(self, el1):
for idx in range(1, 11):
el2 = SubElement(el1,
'text:table-of-content-entry-template',
attrib={
'text:outline-level': "%d" % (idx, ),
'text:style-name': self.rststyle('contents-%d' % (idx, )),
})
el3 = SubElement(el2, 'text:index-entry-chapter')
el3 = SubElement(el2, 'text:index-entry-text')
el3 = SubElement(el2, 'text:index-entry-tab-stop', attrib={
'style:leader-char': ".",
'style:type': "right",
})
el3 = SubElement(el2, 'text:index-entry-page-number')
def visit_topic(self, node):
if 'classes' in node.attributes:
if 'contents' in node.attributes['classes']:
if self.settings.generate_oowriter_toc:
el1 = self.append_child('text:table-of-content', attrib={
'text:name': 'Table of Contents1',
'text:protected': 'true',
'text:style-name': 'Sect1',
})
el2 = SubElement(el1,
'text:table-of-content-source',
attrib={
'text:outline-level': '10',
})
el3 =SubElement(el2, 'text:index-title-template', attrib={
'text:style-name': 'Contents_20_Heading',
})
el3.text = 'Table of Contents'
self.generate_table_of_content_entry_template(el2)
el4 = SubElement(el1, 'text:index-body')
el5 = SubElement(el4, 'text:index-title')
el6 = SubElement(el5, 'text:p', attrib={
'text:style-name': self.rststyle('contents-heading'),
})
el6.text = 'Table of Contents'
self.save_current_element = self.current_element
self.table_of_content_index_body = el4
self.set_current_element(el4)
else:
el = self.append_p('horizontalline')
el = self.append_p('centeredtextbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = 'Contents'
self.in_table_of_contents = True
elif 'abstract' in node.attributes['classes']:
el = self.append_p('horizontalline')
el = self.append_p('centeredtextbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = 'Abstract'
def depart_topic(self, node):
if 'classes' in node.attributes:
if 'contents' in node.attributes['classes']:
if self.settings.generate_oowriter_toc:
self.update_toc_page_numbers(
self.table_of_content_index_body)
self.set_current_element(self.save_current_element)
else:
el = self.append_p('horizontalline')
self.in_table_of_contents = False
def update_toc_page_numbers(self, el):
collection = []
self.update_toc_collect(el, 0, collection)
self.update_toc_add_numbers(collection)
def update_toc_collect(self, el, level, collection):
collection.append((level, el))
level += 1
for child_el in el.getchildren():
if child_el.tag != 'text:index-body':
self.update_toc_collect(child_el, level, collection)
def update_toc_add_numbers(self, collection):
for level, el1 in collection:
if (el1.tag == 'text:p' and
el1.text != 'Table of Contents'):
el2 = SubElement(el1, 'text:tab')
el2.tail = '9999'
def visit_transition(self, node):
el = self.append_p('horizontalline')
def depart_transition(self, node):
pass
#
# Admonitions
#
def visit_warning(self, node):
self.generate_admonition(node, 'warning')
def depart_warning(self, node):
self.paragraph_style_stack.pop()
def visit_attention(self, node):
self.generate_admonition(node, 'attention')
depart_attention = depart_warning
def visit_caution(self, node):
self.generate_admonition(node, 'caution')
depart_caution = depart_warning
def visit_danger(self, node):
self.generate_admonition(node, 'danger')
depart_danger = depart_warning
def visit_error(self, node):
self.generate_admonition(node, 'error')
depart_error = depart_warning
def visit_hint(self, node):
self.generate_admonition(node, 'hint')
depart_hint = depart_warning
def visit_important(self, node):
self.generate_admonition(node, 'important')
depart_important = depart_warning
def visit_note(self, node):
self.generate_admonition(node, 'note')
depart_note = depart_warning
def visit_tip(self, node):
self.generate_admonition(node, 'tip')
depart_tip = depart_warning
def visit_admonition(self, node):
#import pdb; pdb.set_trace()
title = None
for child in node.children:
if child.tagname == 'title':
title = child.astext()
if title is None:
classes1 = node.get('classes')
if classes1:
title = classes1[0]
self.generate_admonition(node, 'generic', title)
depart_admonition = depart_warning
def generate_admonition(self, node, label, title=None):
el1 = SubElement(self.current_element, 'text:p', attrib = {
'text:style-name': self.rststyle('admon-%s-hdr', ( label, )),
})
if title:
el1.text = title
else:
el1.text = '%s!' % (label.capitalize(), )
s1 = self.rststyle('admon-%s-body', ( label, ))
self.paragraph_style_stack.append(s1)
#
# Roles (e.g. subscript, superscript, strong, ...
#
def visit_subscript(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': 'rststyle-subscript',
})
self.set_current_element(el)
def depart_subscript(self, node):
self.set_to_parent()
def visit_superscript(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': 'rststyle-superscript',
})
self.set_current_element(el)
def depart_superscript(self, node):
self.set_to_parent()
# Use an own reader to modify transformations done.
class Reader(standalone.Reader):
def get_transforms(self):
default = standalone.Reader.get_transforms(self)
if self.settings.create_links:
return default
return [ i
for i in default
if i is not references.DanglingReferences ]
| Python |
# $Id: __init__.py 6315 2010-04-28 12:28:33Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the XHTML version 1.0 Transitional DTD
(*almost* strict). The output contains a minimum of formatting
information. The cascading style sheet "html4css1.css" is required
for proper viewing with a modern graphical browser.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import time
import re
try:
import Image # check for the Python Imaging Library
except ImportError:
Image = None
import docutils
from docutils import frontend, nodes, utils, writers, languages, io
from docutils.transforms import writer_aux
class Writer(writers.Writer):
supported = ('html', 'html4css1', 'xhtml')
"""Formats this writer supports."""
default_stylesheet = 'html4css1.css'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Specify comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL>', 'overrides': 'stylesheet_path'}),
('Specify comma separated list of stylesheet paths. '
'With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % default_stylesheet_path,
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet',
'default': default_stylesheet_path}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
relative_path_settings = ('stylesheet_path',)
config_section = 'html4css1 writer'
config_section_dependencies = ('writers',)
visitor_attributes = (
'head_prefix', 'head', 'stylesheet', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'body_suffix',
'title', 'subtitle', 'header', 'footer', 'meta', 'fragment',
'html_prolog', 'html_head', 'html_title', 'html_subtitle',
'html_body')
def get_transforms(self):
return writers.Writer.get_transforms(self) + [writer_aux.Admonitions]
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = HTMLTranslator
def translate(self):
self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
for attr in self.visitor_attributes:
setattr(self, attr, getattr(visitor, attr))
self.output = self.apply_template()
def apply_template(self):
template_file = open(self.document.settings.template, 'rb')
template = unicode(template_file.read(), 'utf-8')
template_file.close()
subs = self.interpolation_dict()
return template % subs
def interpolation_dict(self):
subs = {}
settings = self.document.settings
for attr in self.visitor_attributes:
subs[attr] = ''.join(getattr(self, attr)).rstrip('\n')
subs['encoding'] = settings.output_encoding
subs['version'] = docutils.__version__
return subs
def assemble_parts(self):
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
self.parts[part] = ''.join(getattr(self, part))
class HTMLTranslator(nodes.NodeVisitor):
"""
This HTML writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in browsers (although they really
shouldn't).
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
xml_declaration = '<?xml version="1.0" encoding="%s" ?>\n'
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
head_prefix_template = ('<html xmlns="http://www.w3.org/1999/xhtml"'
' xml:lang="%s" lang="%s">\n<head>\n')
content_type = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
generator = ('<meta name="generator" content="Docutils %s: '
'http://docutils.sourceforge.net/" />\n')
stylesheet_link = '<link rel="stylesheet" href="%s" type="text/css" />\n'
embedded_stylesheet = '<style type="text/css">\n\n%s\n</style>\n'
words_and_spaces = re.compile(r'\S+| +|\n')
sollbruchstelle = re.compile(r'.+\W\W.+|[-?].+', re.U) # wrap point inside word
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode)
self.meta = [self.content_type % settings.output_encoding,
self.generator % docutils.__version__]
self.head_prefix = []
self.html_prolog = []
if settings.xml_declaration:
self.head_prefix.append(self.xml_declaration
% settings.output_encoding)
# encoding not interpolated:
self.html_prolog.append(self.xml_declaration)
self.head_prefix.extend([self.doctype,
self.head_prefix_template % (lcode, lcode)])
self.html_prolog.append(self.doctype)
self.head = self.meta[:]
# stylesheets
styles = utils.get_stylesheet_list(settings)
if settings.stylesheet_path and not(settings.embed_stylesheet):
styles = [utils.relative_path(settings._destination, sheet)
for sheet in styles]
if settings.embed_stylesheet:
settings.record_dependencies.add(*styles)
self.stylesheet = [self.embedded_stylesheet %
io.FileInput(source_path=sheet, encoding='utf-8').read()
for sheet in styles]
else: # link to stylesheets
self.stylesheet = [self.stylesheet_link % self.encode(stylesheet)
for stylesheet in styles]
self.body_prefix = ['</head>\n<body>\n']
# document title, subtitle display
self.body_pre_docinfo = []
# author, date, etc.
self.docinfo = []
self.body = []
self.fragment = []
self.body_suffix = ['</body>\n</html>\n']
self.section_level = 0
self.initial_header_level = int(settings.initial_header_level)
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
self.topic_classes = []
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
self.compact_field_list = None
self.in_docinfo = None
self.in_sidebar = None
self.title = []
self.subtitle = []
self.header = []
self.footer = []
self.html_head = [self.content_type] # charset not interpolated
self.html_title = []
self.html_subtitle = []
self.html_body = []
self.in_document_title = 0
self.in_mailto = 0
self.author_in_authors = None
def astext(self):
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
+ self.body_pre_docinfo + self.docinfo
+ self.body + self.body_suffix)
def encode(self, text):
"""Encode special characters in `text` & return."""
# @@@ A codec to do these and all other HTML entities would be nice.
text = unicode(text)
return text.translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
ord('@'): u'@', # may thwart some address harvesters
# TODO: convert non-breaking space only if needed?
0xa0: u' '}) # non-breaking space
def cloak_mailto(self, uri):
"""Try to hide a mailto: URL from harvesters."""
# Encode "@" using a URL octet reference (see RFC 1738).
# Further cloaking with HTML entities will be done in the
# `attval` function.
return uri.replace('@', '%40')
def cloak_email(self, addr):
"""Try to hide the link text of a email link from harversters."""
# Surround at-signs and periods with <span> tags. ("@" has
# already been encoded to "@" by the `encode` method.)
addr = addr.replace('@', '<span>@</span>')
addr = addr.replace('.', '<span>.</span>')
return addr
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, HTML encode, and return attribute value text."""
encoded = self.encode(whitespace.sub(' ', text))
if self.in_mailto and self.settings.cloak_email_addresses:
# Cloak at-signs ("%40") and periods with HTML entities.
encoded = encoded.replace('%40', '%40')
encoded = encoded.replace('.', '.')
return encoded
def starttag(self, node, tagname, suffix='\n', empty=0, **attributes):
"""
Construct and return a start tag given a node (id & class attributes
are extracted), tag name, and optional attributes.
"""
tagname = tagname.lower()
prefix = []
atts = {}
ids = []
for (name, value) in attributes.items():
atts[name.lower()] = value
classes = node.get('classes', [])
if 'class' in atts:
classes.append(atts['class'])
if classes:
atts['class'] = ' '.join(classes)
assert 'id' not in atts
ids.extend(node.get('ids', []))
if 'ids' in atts:
ids.extend(atts['ids'])
del atts['ids']
if ids:
atts['id'] = ids[0]
for id in ids[1:]:
# Add empty "span" elements for additional IDs. Note
# that we cannot use empty "a" elements because there
# may be targets inside of references, but nested "a"
# elements aren't allowed in XHTML (even if they do
# not all have a "href" attribute).
if empty:
# Empty tag. Insert target right in front of element.
prefix.append('<span id="%s"></span>' % id)
else:
# Non-empty tag. Place the auxiliary <span> tag
# *inside* the element, as the first child.
suffix += '<span id="%s"></span>' % id
attlist = atts.items()
attlist.sort()
parts = [tagname]
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
values = [unicode(v) for v in value]
parts.append('%s="%s"' % (name.lower(),
self.attval(' '.join(values))))
else:
parts.append('%s="%s"' % (name.lower(),
self.attval(unicode(value))))
if empty:
infix = ' /'
else:
infix = ''
return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix
def emptytag(self, node, tagname, suffix='\n', **attributes):
"""Construct and return an XML-compatible empty tag."""
return self.starttag(node, tagname, suffix, empty=1, **attributes)
def set_class_on_child(self, node, class_, index=0):
"""
Set class `class_` on the visible child no. index of `node`.
Do nothing if node has fewer children than `index`.
"""
children = [n for n in node if not isinstance(n, nodes.Invisible)]
try:
child = children[index]
except IndexError:
return
child['classes'].append(class_)
def set_first_last(self, node):
self.set_class_on_child(node, 'first', 0)
self.set_class_on_child(node, 'last', -1)
def visit_Text(self, node):
text = node.astext()
encoded = self.encode(text)
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
self.body.append(encoded)
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'abbr', ''))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_acronym(self, node):
# @@@ implementation incomplete ("title" attribute)
self.body.append(self.starttag(node, 'acronym', ''))
def depart_acronym(self, node):
self.body.append('</acronym>')
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=None)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
def depart_address(self, node):
self.body.append('\n</pre>\n')
self.depart_docinfo_item()
def visit_admonition(self, node):
self.body.append(self.starttag(node, 'div'))
self.set_first_last(node)
def depart_admonition(self, node=None):
self.body.append('</div>\n')
attribution_formats = {'dash': ('—', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.context.append(suffix)
self.body.append(
self.starttag(node, 'p', prefix, CLASS='attribution'))
def depart_attribution(self, node):
self.body.append(self.context.pop() + '</p>\n')
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
if self.author_in_authors:
self.body.append('\n<br />')
else:
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
if isinstance(node.parent, nodes.authors):
self.author_in_authors += 1
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = 0 # initialize counter
def depart_authors(self, node):
self.depart_docinfo_item()
self.author_in_authors = None
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append('</blockquote>\n')
def check_simple_list(self, node):
"""Check for a simple list that can be rendered compactly."""
visitor = SimpleListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return None
else:
return 1
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
or self.check_simple_list(node))))
def visit_bullet_list(self, node):
atts = {}
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = 'simple'
self.body.append(self.starttag(node, 'ul', **atts))
def depart_bullet_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ul>\n')
def visit_caption(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='caption'))
def depart_caption(self, node):
self.body.append('</p>\n')
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
href = '#' + node['refid']
self.body.append(self.starttag(
node, 'a', '[', CLASS='citation-reference', href=href))
def depart_citation_reference(self, node):
self.body.append(']</a>')
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
def depart_classifier(self, node):
self.body.append('</span>')
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
def write_colspecs(self):
width = 0
for node in self.colspecs:
width += node['colwidth']
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.colspecs = []
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
"""Escape double-dashes in comment text."""
self.body.append('<!-- %s -->\n' % sub('- ', node.astext()))
# Content already processed:
raise nodes.SkipNode
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div', CLASS='compound'))
if len(node) > 1:
node[0]['classes'].append('compound-first')
node[-1]['classes'].append('compound-last')
for child in node[1:-1]:
child['classes'].append('compound-middle')
def depart_compound(self, node):
self.body.append('</div>\n')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div', CLASS='container'))
def depart_container(self, node):
self.body.append('</div>\n')
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact', meta=None)
def depart_contact(self, node):
self.depart_docinfo_item()
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item()
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
def depart_definition(self, node):
self.body.append('</dd>\n')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
def depart_definition_list(self, node):
self.body.append('</dl>\n')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
def visit_docinfo(self, node):
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
'<tbody valign="top">\n')
self.in_docinfo = 1
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = None
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=1):
if meta:
meta_tag = '<meta name="%s" content="%s" />\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
def depart_doctest_block(self, node):
self.body.append('\n</pre>\n')
def visit_document(self, node):
self.head.append('<title>%s</title>\n'
% self.encode(node.get('title', '')))
def depart_document(self, node):
self.fragment.extend(self.body)
self.body_prefix.append(self.starttag(node, 'div', CLASS='document'))
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
assert not self.context, 'len(context) = %s' % len(self.context)
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em', ''))
def depart_emphasis(self, node):
self.body.append('</em>')
def visit_entry(self, node):
atts = {'class': []}
if isinstance(node.parent.parent, nodes.thead):
atts['class'].append('head')
if node.parent.parent.parent.stubs[node.parent.column]:
# "stubs" list is an attribute of the tgroup element
atts['class'].append('stub')
if atts['class']:
tagname = 'th'
atts['class'] = ' '.join(atts['class'])
else:
tagname = 'td'
del atts['class']
node.parent.column += 1
if 'morerows' in node:
atts['rowspan'] = node['morerows'] + 1
if 'morecols' in node:
atts['colspan'] = node['morecols'] + 1
node.parent.column += node['morecols']
self.body.append(self.starttag(node, tagname, '', **atts))
self.context.append('</%s>\n' % tagname.lower())
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
def depart_entry(self, node):
self.body.append(self.context.pop())
def visit_enumerated_list(self, node):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
CSS1 doesn't help. CSS2 isn't widely enough supported yet to be
usable.
"""
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = 1
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = 1
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = 0
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
def visit_figure(self, node):
atts = {'class': 'figure'}
if node.get('width'):
atts['style'] = 'width: %s' % node['width']
if node.get('align'):
atts['class'] += " align-" + node['align']
self.body.append(self.starttag(node, 'div', **atts))
def depart_figure(self, node):
self.body.append('</div>\n')
def visit_footer(self, node):
self.context.append(len(self.body))
def depart_footer(self, node):
start = self.context.pop()
footer = [self.starttag(node, 'div', CLASS='footer'),
'<hr class="footer" />\n']
footer.extend(self.body[start:])
footer.append('\n</div>\n')
self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
i = 1
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.context.append(len(self.body))
def depart_header(self, node):
start = self.context.pop()
header = [self.starttag(node, 'div', CLASS='header')]
header.extend(self.body[start:])
header.append('\n<hr class="header"/>\n</div>\n')
self.body_prefix.extend(header)
self.header.extend(header)
del self.body[start:]
def visit_image(self, node):
atts = {}
uri = node['uri']
# place SVG and SWF images in an <object> element
types = {'.svg': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash'}
ext = os.path.splitext(uri)[1].lower()
if ext in ('.svg', '.swf'):
atts['data'] = uri
atts['type'] = types[ext]
else:
atts['src'] = uri
atts['alt'] = node.get('alt', uri)
# image size
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if Image and not ('width' in node and 'height' in node):
try:
im = Image.open(str(uri))
except (IOError, # Source image can't be found or opened
UnicodeError): # PIL doesn't like Unicode paths.
pass
else:
if 'width' not in atts:
atts['width'] = str(im.size[0])
if 'height' not in atts:
atts['height'] = str(im.size[1])
del im
for att_name in 'width', 'height':
if att_name in atts:
match = re.match(r'([0-9.]+)(\S*)$', atts[att_name])
assert match
atts[att_name] = '%s%s' % (
float(match.group(1)) * (float(node['scale']) / 100),
match.group(2))
style = []
for att_name in 'width', 'height':
if att_name in atts:
if re.match(r'^[0-9.]+$', atts[att_name]):
# Interpret unitless values as pixels.
atts[att_name] += 'px'
style.append('%s: %s;' % (att_name, atts[att_name]))
del atts[att_name]
if style:
atts['style'] = ' '.join(style)
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
self.context.append('')
if ext in ('.svg', '.swf'): # place in an object element,
# do NOT use an empty tag: incorrect rendering in browsers
self.body.append(self.starttag(node, 'object', suffix, **atts) +
node.get('alt', uri) + '</object>' + suffix)
else:
self.body.append(self.emptytag(node, 'img', suffix, **atts))
def depart_image(self, node):
self.body.append(self.context.pop())
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span', ''))
def depart_inline(self, node):
self.body.append('</span>')
def visit_label(self, node):
# Context added in footnote_backrefs.
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(']%s</td><td>%s' % (self.context.pop(), self.context.pop()))
def visit_legend(self, node):
self.body.append(self.starttag(node, 'div', CLASS='legend'))
def depart_legend(self, node):
self.body.append('</div>\n')
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', suffix='', CLASS='line'))
if not len(node):
self.body.append('<br />')
def depart_line(self, node):
self.body.append('</div>\n')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append('</div>\n')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
node[0]['classes'].append('first')
def depart_list_item(self, node):
self.body.append('</li>\n')
def visit_literal(self, node):
"""Process text to prevent tokens from wrapping."""
self.body.append(
self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.sollbruchstelle.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</tt>')
# Content already processed:
raise nodes.SkipNode
def visit_literal_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='literal-block'))
def depart_literal_block(self, node):
self.body.append('\n</pre>\n')
def visit_meta(self, node):
meta = self.emptytag(node, 'meta', **node.non_default_attributes())
self.add_meta(meta)
def depart_meta(self, node):
pass
def add_meta(self, tag):
self.meta.append(tag)
self.head.append(tag)
def visit_option(self, node):
if self.context[-1]:
self.body.append(', ')
self.body.append(self.starttag(node, 'span', '', CLASS='option'))
def depart_option(self, node):
self.body.append('</span>')
self.context[-1] += 1
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
self.body.append(self.starttag(node, 'var', ''))
def depart_option_argument(self, node):
self.body.append('</var>')
def visit_option_group(self, node):
atts = {}
if ( self.settings.option_limit
and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(
self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
def depart_option_group(self, node):
self.context.pop()
self.body.append('</kbd></td>\n')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append(
self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
'<tbody valign="top">\n')
def depart_option_list(self, node):
self.body.append('</tbody>\n</table>\n')
def visit_option_list_item(self, node):
self.body.append(self.starttag(node, 'tr', ''))
def depart_option_list_item(self, node):
self.body.append('</tr>\n')
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item()
def should_be_compact_paragraph(self, node):
"""
Determine if the <p> tags around paragraph ``node`` can be omitted.
"""
if (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.compound)):
# Never compact paragraphs in document or compound.
return 0
for key, value in node.attlist():
if (node.is_not_default(key) and
not (key == 'classes' and value in
([], ['first'], ['last'], ['first', 'last']))):
# Attribute which needs to survive.
return 0
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return 0
parent_length = len([n for n in node.parent if not isinstance(
n, (nodes.Invisible, nodes.label))])
if ( self.compact_simple
or self.compact_field_list
or self.compact_p and parent_length == 1):
return 1
return 0
def visit_paragraph(self, node):
if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
self.context.append('</p>\n')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_problematic(self, node):
if node.hasattr('refid'):
self.body.append('<a href="#%s">' % node['refid'])
self.context.append('</a>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'span', '', CLASS='problematic'))
def depart_problematic(self, node):
self.body.append('</span>')
self.body.append(self.context.pop())
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = isinstance(node.parent, nodes.TextElement) and 'span' or 'div'
if node['classes']:
self.body.append(self.starttag(node, t, suffix=''))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
atts = {'class': 'reference'}
if 'refuri' in node:
atts['href'] = node['refuri']
if ( self.settings.cloak_email_addresses
and atts['href'].startswith('mailto:')):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = 1
atts['class'] += ' external'
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
atts['class'] += ' internal'
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
self.body.append(self.starttag(node, 'a', '', **atts))
def depart_reference(self, node):
self.body.append('</a>')
if not isinstance(node.parent, nodes.TextElement):
self.body.append('\n')
self.in_mailto = 0
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision', meta=None)
def depart_revision(self, node):
self.depart_docinfo_item()
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0
def depart_row(self, node):
self.body.append('</tr>\n')
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'p', '', CLASS='rubric'))
def depart_rubric(self, node):
self.body.append('</p>\n')
def visit_section(self, node):
self.section_level += 1
self.body.append(
self.starttag(node, 'div', CLASS='section'))
def depart_section(self, node):
self.section_level -= 1
self.body.append('</div>\n')
def visit_sidebar(self, node):
self.body.append(
self.starttag(node, 'div', CLASS='sidebar'))
self.set_first_last(node)
self.in_sidebar = 1
def depart_sidebar(self, node):
self.body.append('</div>\n')
self.in_sidebar = None
def visit_status(self, node):
self.visit_docinfo_item(node, 'status', meta=None)
def depart_status(self, node):
self.depart_docinfo_item()
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong', ''))
def depart_strong(self, node):
self.body.append('</strong>')
def visit_subscript(self, node):
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
self.body.append('</sub>')
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
self.in_document_title = len(self.body)
elif isinstance(node.parent, nodes.section):
tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
self.body.append(
self.starttag(node, tag, '', CLASS='section-subtitle') +
self.starttag({}, 'span', '', CLASS='section-subtitle'))
self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.subtitle = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_subtitle.extend(self.body)
del self.body[:]
def visit_superscript(self, node):
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
self.body.append('</sup>')
def visit_system_message(self, node):
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
backref_text = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
% backrefs[0])
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a href="#%s">%s</a>' % (backref, i))
i += 1
backref_text = ('; <em>backlinks: %s</em>'
% ', '.join(backlinks))
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s '
'(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (node['type'], node['level'],
self.encode(node['source']), line, backref_text))
def depart_system_message(self, node):
self.body.append('</div>\n')
def visit_table(self, node):
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="1"))
def depart_table(self, node):
self.body.append('</table>\n')
def visit_target(self, node):
if not ('refuri' in node or 'refid' in node
or 'refname' in node):
self.body.append(self.starttag(node, 'span', '', CLASS='target'))
self.context.append('</span>')
else:
self.context.append('')
def depart_target(self, node):
self.body.append(self.context.pop())
def visit_tbody(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n' or ''
self.body.append(self.starttag(node, 'tbody', valign='top'))
def depart_tbody(self, node):
self.body.append('</tbody>\n')
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt', ''))
def depart_term(self, node):
"""
Leave the end tag to `self.visit_definition()`, in case there's a
classifier.
"""
pass
def visit_tgroup(self, node):
# Mozilla needs <colgroup>:
self.body.append(self.starttag(node, 'colgroup'))
# Appended by thead or tbody:
self.context.append('</colgroup>\n')
node.stubs = []
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.write_colspecs()
self.body.append(self.context.pop()) # '</colgroup>\n'
# There may or may not be a <thead>; this is for <tbody> to use:
self.context.append('')
self.body.append(self.starttag(node, 'thead', valign='bottom'))
def depart_thead(self, node):
self.body.append('</thead>\n')
def visit_title(self, node):
"""Only 6 section levels are supported by HTML."""
check_id = 0
close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
self.body.append(
self.starttag(node, 'p', '', CLASS='topic-title first'))
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
elif isinstance(node.parent, nodes.table):
self.body.append(
self.starttag(node, 'caption', ''))
close_tag = '</caption>\n'
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1', '', CLASS='title'))
close_tag = '</h1>\n'
self.in_document_title = len(self.body)
else:
assert isinstance(node.parent, nodes.section)
h_level = self.section_level + self.initial_header_level - 1
atts = {}
if (len(node.parent) >= 2 and
isinstance(node.parent[1], nodes.subtitle)):
atts['CLASS'] = 'with-subtitle'
self.body.append(
self.starttag(node, 'h%s' % h_level, '', **atts))
atts = {}
if node.hasattr('refid'):
atts['class'] = 'toc-backref'
atts['href'] = '#' + node['refid']
if atts:
self.body.append(self.starttag({}, 'a', '', **atts))
close_tag = '</a></h%s>\n' % (h_level)
else:
close_tag = '</h%s>\n' % (h_level)
self.context.append(close_tag)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.title = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_title.extend(self.body)
del self.body[:]
def visit_title_reference(self, node):
self.body.append(self.starttag(node, 'cite', ''))
def depart_title_reference(self, node):
self.body.append('</cite>')
def visit_topic(self, node):
self.body.append(self.starttag(node, 'div', CLASS='topic'))
self.topic_classes = node['classes']
def depart_topic(self, node):
self.body.append('</div>\n')
self.topic_classes = []
def visit_transition(self, node):
self.body.append(self.emptytag(node, 'hr', CLASS='docutils'))
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version', meta=None)
def depart_version(self, node):
self.depart_docinfo_item()
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
class SimpleListChecker(nodes.GenericNodeVisitor):
"""
Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def default_visit(self, node):
raise nodes.NodeFound
def visit_bullet_list(self, node):
pass
def visit_enumerated_list(self, node):
pass
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
and (isinstance(children[-1], nodes.bullet_list)
or isinstance(children[-1], nodes.enumerated_list))):
children.pop()
if len(children) <= 1:
return
else:
raise nodes.NodeFound
def visit_paragraph(self, node):
raise nodes.SkipNode
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
raise nodes.SkipNode
visit_comment = invisible_visit
visit_substitution_definition = invisible_visit
visit_target = invisible_visit
visit_pending = invisible_visit
| Python |
# $Id: null.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A do-nothing Writer.
"""
from docutils import writers
class Writer(writers.UnfilteredWriter):
supported = ('null',)
"""Formats this writer supports."""
config_section = 'null writer'
config_section_dependencies = ('writers',)
def translate(self):
pass
| Python |
# $Id$
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This file has been placed in the public domain.
# This is a mapping of Unicode characters to LaTeX equivalents.
# The information has been extracted from
# <http://www.w3.org/2003/entities/xml/unicode.xml>, written by
# David Carlisle and Sebastian Rahtz.
#
# The extraction has been done by the "create_unimap.py" script
# located at <http://docutils.sf.net/tools/dev/create_unimap.py>.
unicode_map = {u'\xa0': '$~$',
u'\xa1': '{\\textexclamdown}',
u'\xa2': '{\\textcent}',
u'\xa3': '{\\textsterling}',
u'\xa4': '{\\textcurrency}',
u'\xa5': '{\\textyen}',
u'\xa6': '{\\textbrokenbar}',
u'\xa7': '{\\textsection}',
u'\xa8': '{\\textasciidieresis}',
u'\xa9': '{\\textcopyright}',
u'\xaa': '{\\textordfeminine}',
u'\xab': '{\\guillemotleft}',
u'\xac': '$\\lnot$',
u'\xad': '$\\-$',
u'\xae': '{\\textregistered}',
u'\xaf': '{\\textasciimacron}',
u'\xb0': '{\\textdegree}',
u'\xb1': '$\\pm$',
u'\xb2': '${^2}$',
u'\xb3': '${^3}$',
u'\xb4': '{\\textasciiacute}',
u'\xb5': '$\\mathrm{\\mu}$',
u'\xb6': '{\\textparagraph}',
u'\xb7': '$\\cdot$',
u'\xb8': '{\\c{}}',
u'\xb9': '${^1}$',
u'\xba': '{\\textordmasculine}',
u'\xbb': '{\\guillemotright}',
u'\xbc': '{\\textonequarter}',
u'\xbd': '{\\textonehalf}',
u'\xbe': '{\\textthreequarters}',
u'\xbf': '{\\textquestiondown}',
u'\xc0': '{\\`{A}}',
u'\xc1': "{\\'{A}}",
u'\xc2': '{\\^{A}}',
u'\xc3': '{\\~{A}}',
u'\xc4': '{\\"{A}}',
u'\xc5': '{\\AA}',
u'\xc6': '{\\AE}',
u'\xc7': '{\\c{C}}',
u'\xc8': '{\\`{E}}',
u'\xc9': "{\\'{E}}",
u'\xca': '{\\^{E}}',
u'\xcb': '{\\"{E}}',
u'\xcc': '{\\`{I}}',
u'\xcd': "{\\'{I}}",
u'\xce': '{\\^{I}}',
u'\xcf': '{\\"{I}}',
u'\xd0': '{\\DH}',
u'\xd1': '{\\~{N}}',
u'\xd2': '{\\`{O}}',
u'\xd3': "{\\'{O}}",
u'\xd4': '{\\^{O}}',
u'\xd5': '{\\~{O}}',
u'\xd6': '{\\"{O}}',
u'\xd7': '{\\texttimes}',
u'\xd8': '{\\O}',
u'\xd9': '{\\`{U}}',
u'\xda': "{\\'{U}}",
u'\xdb': '{\\^{U}}',
u'\xdc': '{\\"{U}}',
u'\xdd': "{\\'{Y}}",
u'\xde': '{\\TH}',
u'\xdf': '{\\ss}',
u'\xe0': '{\\`{a}}',
u'\xe1': "{\\'{a}}",
u'\xe2': '{\\^{a}}',
u'\xe3': '{\\~{a}}',
u'\xe4': '{\\"{a}}',
u'\xe5': '{\\aa}',
u'\xe6': '{\\ae}',
u'\xe7': '{\\c{c}}',
u'\xe8': '{\\`{e}}',
u'\xe9': "{\\'{e}}",
u'\xea': '{\\^{e}}',
u'\xeb': '{\\"{e}}',
u'\xec': '{\\`{\\i}}',
u'\xed': "{\\'{\\i}}",
u'\xee': '{\\^{\\i}}',
u'\xef': '{\\"{\\i}}',
u'\xf0': '{\\dh}',
u'\xf1': '{\\~{n}}',
u'\xf2': '{\\`{o}}',
u'\xf3': "{\\'{o}}",
u'\xf4': '{\\^{o}}',
u'\xf5': '{\\~{o}}',
u'\xf6': '{\\"{o}}',
u'\xf7': '$\\div$',
u'\xf8': '{\\o}',
u'\xf9': '{\\`{u}}',
u'\xfa': "{\\'{u}}",
u'\xfb': '{\\^{u}}',
u'\xfc': '{\\"{u}}',
u'\xfd': "{\\'{y}}",
u'\xfe': '{\\th}',
u'\xff': '{\\"{y}}',
u'\u0100': '{\\={A}}',
u'\u0101': '{\\={a}}',
u'\u0102': '{\\u{A}}',
u'\u0103': '{\\u{a}}',
u'\u0104': '{\\k{A}}',
u'\u0105': '{\\k{a}}',
u'\u0106': "{\\'{C}}",
u'\u0107': "{\\'{c}}",
u'\u0108': '{\\^{C}}',
u'\u0109': '{\\^{c}}',
u'\u010a': '{\\.{C}}',
u'\u010b': '{\\.{c}}',
u'\u010c': '{\\v{C}}',
u'\u010d': '{\\v{c}}',
u'\u010e': '{\\v{D}}',
u'\u010f': '{\\v{d}}',
u'\u0110': '{\\DJ}',
u'\u0111': '{\\dj}',
u'\u0112': '{\\={E}}',
u'\u0113': '{\\={e}}',
u'\u0114': '{\\u{E}}',
u'\u0115': '{\\u{e}}',
u'\u0116': '{\\.{E}}',
u'\u0117': '{\\.{e}}',
u'\u0118': '{\\k{E}}',
u'\u0119': '{\\k{e}}',
u'\u011a': '{\\v{E}}',
u'\u011b': '{\\v{e}}',
u'\u011c': '{\\^{G}}',
u'\u011d': '{\\^{g}}',
u'\u011e': '{\\u{G}}',
u'\u011f': '{\\u{g}}',
u'\u0120': '{\\.{G}}',
u'\u0121': '{\\.{g}}',
u'\u0122': '{\\c{G}}',
u'\u0123': '{\\c{g}}',
u'\u0124': '{\\^{H}}',
u'\u0125': '{\\^{h}}',
u'\u0126': '{{\\fontencoding{LELA}\\selectfont\\char40}}',
u'\u0127': '$\\Elzxh$',
u'\u0128': '{\\~{I}}',
u'\u0129': '{\\~{\\i}}',
u'\u012a': '{\\={I}}',
u'\u012b': '{\\={\\i}}',
u'\u012c': '{\\u{I}}',
u'\u012d': '{\\u{\\i}}',
u'\u012e': '{\\k{I}}',
u'\u012f': '{\\k{i}}',
u'\u0130': '{\\.{I}}',
u'\u0131': '{\\i}',
u'\u0132': '{IJ}',
u'\u0133': '{ij}',
u'\u0134': '{\\^{J}}',
u'\u0135': '{\\^{\\j}}',
u'\u0136': '{\\c{K}}',
u'\u0137': '{\\c{k}}',
u'\u0138': '{{\\fontencoding{LELA}\\selectfont\\char91}}',
u'\u0139': "{\\'{L}}",
u'\u013a': "{\\'{l}}",
u'\u013b': '{\\c{L}}',
u'\u013c': '{\\c{l}}',
u'\u013d': '{\\v{L}}',
u'\u013e': '{\\v{l}}',
u'\u013f': '{{\\fontencoding{LELA}\\selectfont\\char201}}',
u'\u0140': '{{\\fontencoding{LELA}\\selectfont\\char202}}',
u'\u0141': '{\\L}',
u'\u0142': '{\\l}',
u'\u0143': "{\\'{N}}",
u'\u0144': "{\\'{n}}",
u'\u0145': '{\\c{N}}',
u'\u0146': '{\\c{n}}',
u'\u0147': '{\\v{N}}',
u'\u0148': '{\\v{n}}',
u'\u0149': "{'n}",
u'\u014a': '{\\NG}',
u'\u014b': '{\\ng}',
u'\u014c': '{\\={O}}',
u'\u014d': '{\\={o}}',
u'\u014e': '{\\u{O}}',
u'\u014f': '{\\u{o}}',
u'\u0150': '{\\H{O}}',
u'\u0151': '{\\H{o}}',
u'\u0152': '{\\OE}',
u'\u0153': '{\\oe}',
u'\u0154': "{\\'{R}}",
u'\u0155': "{\\'{r}}",
u'\u0156': '{\\c{R}}',
u'\u0157': '{\\c{r}}',
u'\u0158': '{\\v{R}}',
u'\u0159': '{\\v{r}}',
u'\u015a': "{\\'{S}}",
u'\u015b': "{\\'{s}}",
u'\u015c': '{\\^{S}}',
u'\u015d': '{\\^{s}}',
u'\u015e': '{\\c{S}}',
u'\u015f': '{\\c{s}}',
u'\u0160': '{\\v{S}}',
u'\u0161': '{\\v{s}}',
u'\u0162': '{\\c{T}}',
u'\u0163': '{\\c{t}}',
u'\u0164': '{\\v{T}}',
u'\u0165': '{\\v{t}}',
u'\u0166': '{{\\fontencoding{LELA}\\selectfont\\char47}}',
u'\u0167': '{{\\fontencoding{LELA}\\selectfont\\char63}}',
u'\u0168': '{\\~{U}}',
u'\u0169': '{\\~{u}}',
u'\u016a': '{\\={U}}',
u'\u016b': '{\\={u}}',
u'\u016c': '{\\u{U}}',
u'\u016d': '{\\u{u}}',
u'\u016e': '{\\r{U}}',
u'\u016f': '{\\r{u}}',
u'\u0170': '{\\H{U}}',
u'\u0171': '{\\H{u}}',
u'\u0172': '{\\k{U}}',
u'\u0173': '{\\k{u}}',
u'\u0174': '{\\^{W}}',
u'\u0175': '{\\^{w}}',
u'\u0176': '{\\^{Y}}',
u'\u0177': '{\\^{y}}',
u'\u0178': '{\\"{Y}}',
u'\u0179': "{\\'{Z}}",
u'\u017a': "{\\'{z}}",
u'\u017b': '{\\.{Z}}',
u'\u017c': '{\\.{z}}',
u'\u017d': '{\\v{Z}}',
u'\u017e': '{\\v{z}}',
u'\u0192': '$f$',
u'\u0195': '{\\texthvlig}',
u'\u019e': '{\\textnrleg}',
u'\u01aa': '$\\eth$',
u'\u01ba': '{{\\fontencoding{LELA}\\selectfont\\char195}}',
u'\u01c2': '{\\textdoublepipe}',
u'\u01f5': "{\\'{g}}",
u'\u0250': '$\\Elztrna$',
u'\u0252': '$\\Elztrnsa$',
u'\u0254': '$\\Elzopeno$',
u'\u0256': '$\\Elzrtld$',
u'\u0258': '{{\\fontencoding{LEIP}\\selectfont\\char61}}',
u'\u0259': '$\\Elzschwa$',
u'\u025b': '$\\varepsilon$',
u'\u0261': '{g}',
u'\u0263': '$\\Elzpgamma$',
u'\u0264': '$\\Elzpbgam$',
u'\u0265': '$\\Elztrnh$',
u'\u026c': '$\\Elzbtdl$',
u'\u026d': '$\\Elzrtll$',
u'\u026f': '$\\Elztrnm$',
u'\u0270': '$\\Elztrnmlr$',
u'\u0271': '$\\Elzltlmr$',
u'\u0272': '{\\Elzltln}',
u'\u0273': '$\\Elzrtln$',
u'\u0277': '$\\Elzclomeg$',
u'\u0278': '{\\textphi}',
u'\u0279': '$\\Elztrnr$',
u'\u027a': '$\\Elztrnrl$',
u'\u027b': '$\\Elzrttrnr$',
u'\u027c': '$\\Elzrl$',
u'\u027d': '$\\Elzrtlr$',
u'\u027e': '$\\Elzfhr$',
u'\u027f': '{{\\fontencoding{LEIP}\\selectfont\\char202}}',
u'\u0282': '$\\Elzrtls$',
u'\u0283': '$\\Elzesh$',
u'\u0287': '$\\Elztrnt$',
u'\u0288': '$\\Elzrtlt$',
u'\u028a': '$\\Elzpupsil$',
u'\u028b': '$\\Elzpscrv$',
u'\u028c': '$\\Elzinvv$',
u'\u028d': '$\\Elzinvw$',
u'\u028e': '$\\Elztrny$',
u'\u0290': '$\\Elzrtlz$',
u'\u0292': '$\\Elzyogh$',
u'\u0294': '$\\Elzglst$',
u'\u0295': '$\\Elzreglst$',
u'\u0296': '$\\Elzinglst$',
u'\u029e': '{\\textturnk}',
u'\u02a4': '$\\Elzdyogh$',
u'\u02a7': '$\\Elztesh$',
u'\u02bc': "{'}",
u'\u02c7': '{\\textasciicaron}',
u'\u02c8': '$\\Elzverts$',
u'\u02cc': '$\\Elzverti$',
u'\u02d0': '$\\Elzlmrk$',
u'\u02d1': '$\\Elzhlmrk$',
u'\u02d2': '$\\Elzsbrhr$',
u'\u02d3': '$\\Elzsblhr$',
u'\u02d4': '$\\Elzrais$',
u'\u02d5': '$\\Elzlow$',
u'\u02d8': '{\\textasciibreve}',
u'\u02d9': '{\\textperiodcentered}',
u'\u02da': '{\\r{}}',
u'\u02db': '{\\k{}}',
u'\u02dc': '{\\texttildelow}',
u'\u02dd': '{\\H{}}',
u'\u02e5': '{\\tone{55}}',
u'\u02e6': '{\\tone{44}}',
u'\u02e7': '{\\tone{33}}',
u'\u02e8': '{\\tone{22}}',
u'\u02e9': '{\\tone{11}}',
u'\u0300': '{\\`}',
u'\u0301': "{\\'}",
u'\u0302': '{\\^}',
u'\u0303': '{\\~}',
u'\u0304': '{\\=}',
u'\u0306': '{\\u}',
u'\u0307': '{\\.}',
u'\u0308': '{\\"}',
u'\u030a': '{\\r}',
u'\u030b': '{\\H}',
u'\u030c': '{\\v}',
u'\u030f': '{\\cyrchar\\C}',
u'\u0311': '{{\\fontencoding{LECO}\\selectfont\\char177}}',
u'\u0318': '{{\\fontencoding{LECO}\\selectfont\\char184}}',
u'\u0319': '{{\\fontencoding{LECO}\\selectfont\\char185}}',
u'\u0321': '$\\Elzpalh$',
u'\u0322': '{\\Elzrh}',
u'\u0327': '{\\c}',
u'\u0328': '{\\k}',
u'\u032a': '$\\Elzsbbrg$',
u'\u032b': '{{\\fontencoding{LECO}\\selectfont\\char203}}',
u'\u032f': '{{\\fontencoding{LECO}\\selectfont\\char207}}',
u'\u0335': '{\\Elzxl}',
u'\u0336': '{\\Elzbar}',
u'\u0337': '{{\\fontencoding{LECO}\\selectfont\\char215}}',
u'\u0338': '{{\\fontencoding{LECO}\\selectfont\\char216}}',
u'\u033a': '{{\\fontencoding{LECO}\\selectfont\\char218}}',
u'\u033b': '{{\\fontencoding{LECO}\\selectfont\\char219}}',
u'\u033c': '{{\\fontencoding{LECO}\\selectfont\\char220}}',
u'\u033d': '{{\\fontencoding{LECO}\\selectfont\\char221}}',
u'\u0361': '{{\\fontencoding{LECO}\\selectfont\\char225}}',
u'\u0386': "{\\'{A}}",
u'\u0388': "{\\'{E}}",
u'\u0389': "{\\'{H}}",
u'\u038a': "{\\'{}{I}}",
u'\u038c': "{\\'{}O}",
u'\u038e': "$\\mathrm{'Y}$",
u'\u038f': "$\\mathrm{'\\Omega}$",
u'\u0390': '$\\acute{\\ddot{\\iota}}$',
u'\u0391': '$\\Alpha$',
u'\u0392': '$\\Beta$',
u'\u0393': '$\\Gamma$',
u'\u0394': '$\\Delta$',
u'\u0395': '$\\Epsilon$',
u'\u0396': '$\\Zeta$',
u'\u0397': '$\\Eta$',
u'\u0398': '$\\Theta$',
u'\u0399': '$\\Iota$',
u'\u039a': '$\\Kappa$',
u'\u039b': '$\\Lambda$',
u'\u039c': '$M$',
u'\u039d': '$N$',
u'\u039e': '$\\Xi$',
u'\u039f': '$O$',
u'\u03a0': '$\\Pi$',
u'\u03a1': '$\\Rho$',
u'\u03a3': '$\\Sigma$',
u'\u03a4': '$\\Tau$',
u'\u03a5': '$\\Upsilon$',
u'\u03a6': '$\\Phi$',
u'\u03a7': '$\\Chi$',
u'\u03a8': '$\\Psi$',
u'\u03a9': '$\\Omega$',
u'\u03aa': '$\\mathrm{\\ddot{I}}$',
u'\u03ab': '$\\mathrm{\\ddot{Y}}$',
u'\u03ac': "{\\'{$\\alpha$}}",
u'\u03ad': '$\\acute{\\epsilon}$',
u'\u03ae': '$\\acute{\\eta}$',
u'\u03af': '$\\acute{\\iota}$',
u'\u03b0': '$\\acute{\\ddot{\\upsilon}}$',
u'\u03b1': '$\\alpha$',
u'\u03b2': '$\\beta$',
u'\u03b3': '$\\gamma$',
u'\u03b4': '$\\delta$',
u'\u03b5': '$\\epsilon$',
u'\u03b6': '$\\zeta$',
u'\u03b7': '$\\eta$',
u'\u03b8': '{\\texttheta}',
u'\u03b9': '$\\iota$',
u'\u03ba': '$\\kappa$',
u'\u03bb': '$\\lambda$',
u'\u03bc': '$\\mu$',
u'\u03bd': '$\\nu$',
u'\u03be': '$\\xi$',
u'\u03bf': '$o$',
u'\u03c0': '$\\pi$',
u'\u03c1': '$\\rho$',
u'\u03c2': '$\\varsigma$',
u'\u03c3': '$\\sigma$',
u'\u03c4': '$\\tau$',
u'\u03c5': '$\\upsilon$',
u'\u03c6': '$\\varphi$',
u'\u03c7': '$\\chi$',
u'\u03c8': '$\\psi$',
u'\u03c9': '$\\omega$',
u'\u03ca': '$\\ddot{\\iota}$',
u'\u03cb': '$\\ddot{\\upsilon}$',
u'\u03cc': "{\\'{o}}",
u'\u03cd': '$\\acute{\\upsilon}$',
u'\u03ce': '$\\acute{\\omega}$',
u'\u03d0': '{\\Pisymbol{ppi022}{87}}',
u'\u03d1': '{\\textvartheta}',
u'\u03d2': '$\\Upsilon$',
u'\u03d5': '$\\phi$',
u'\u03d6': '$\\varpi$',
u'\u03da': '$\\Stigma$',
u'\u03dc': '$\\Digamma$',
u'\u03dd': '$\\digamma$',
u'\u03de': '$\\Koppa$',
u'\u03e0': '$\\Sampi$',
u'\u03f0': '$\\varkappa$',
u'\u03f1': '$\\varrho$',
u'\u03f4': '{\\textTheta}',
u'\u03f6': '$\\backepsilon$',
u'\u0401': '{\\cyrchar\\CYRYO}',
u'\u0402': '{\\cyrchar\\CYRDJE}',
u'\u0403': "{\\cyrchar{\\'\\CYRG}}",
u'\u0404': '{\\cyrchar\\CYRIE}',
u'\u0405': '{\\cyrchar\\CYRDZE}',
u'\u0406': '{\\cyrchar\\CYRII}',
u'\u0407': '{\\cyrchar\\CYRYI}',
u'\u0408': '{\\cyrchar\\CYRJE}',
u'\u0409': '{\\cyrchar\\CYRLJE}',
u'\u040a': '{\\cyrchar\\CYRNJE}',
u'\u040b': '{\\cyrchar\\CYRTSHE}',
u'\u040c': "{\\cyrchar{\\'\\CYRK}}",
u'\u040e': '{\\cyrchar\\CYRUSHRT}',
u'\u040f': '{\\cyrchar\\CYRDZHE}',
u'\u0410': '{\\cyrchar\\CYRA}',
u'\u0411': '{\\cyrchar\\CYRB}',
u'\u0412': '{\\cyrchar\\CYRV}',
u'\u0413': '{\\cyrchar\\CYRG}',
u'\u0414': '{\\cyrchar\\CYRD}',
u'\u0415': '{\\cyrchar\\CYRE}',
u'\u0416': '{\\cyrchar\\CYRZH}',
u'\u0417': '{\\cyrchar\\CYRZ}',
u'\u0418': '{\\cyrchar\\CYRI}',
u'\u0419': '{\\cyrchar\\CYRISHRT}',
u'\u041a': '{\\cyrchar\\CYRK}',
u'\u041b': '{\\cyrchar\\CYRL}',
u'\u041c': '{\\cyrchar\\CYRM}',
u'\u041d': '{\\cyrchar\\CYRN}',
u'\u041e': '{\\cyrchar\\CYRO}',
u'\u041f': '{\\cyrchar\\CYRP}',
u'\u0420': '{\\cyrchar\\CYRR}',
u'\u0421': '{\\cyrchar\\CYRS}',
u'\u0422': '{\\cyrchar\\CYRT}',
u'\u0423': '{\\cyrchar\\CYRU}',
u'\u0424': '{\\cyrchar\\CYRF}',
u'\u0425': '{\\cyrchar\\CYRH}',
u'\u0426': '{\\cyrchar\\CYRC}',
u'\u0427': '{\\cyrchar\\CYRCH}',
u'\u0428': '{\\cyrchar\\CYRSH}',
u'\u0429': '{\\cyrchar\\CYRSHCH}',
u'\u042a': '{\\cyrchar\\CYRHRDSN}',
u'\u042b': '{\\cyrchar\\CYRERY}',
u'\u042c': '{\\cyrchar\\CYRSFTSN}',
u'\u042d': '{\\cyrchar\\CYREREV}',
u'\u042e': '{\\cyrchar\\CYRYU}',
u'\u042f': '{\\cyrchar\\CYRYA}',
u'\u0430': '{\\cyrchar\\cyra}',
u'\u0431': '{\\cyrchar\\cyrb}',
u'\u0432': '{\\cyrchar\\cyrv}',
u'\u0433': '{\\cyrchar\\cyrg}',
u'\u0434': '{\\cyrchar\\cyrd}',
u'\u0435': '{\\cyrchar\\cyre}',
u'\u0436': '{\\cyrchar\\cyrzh}',
u'\u0437': '{\\cyrchar\\cyrz}',
u'\u0438': '{\\cyrchar\\cyri}',
u'\u0439': '{\\cyrchar\\cyrishrt}',
u'\u043a': '{\\cyrchar\\cyrk}',
u'\u043b': '{\\cyrchar\\cyrl}',
u'\u043c': '{\\cyrchar\\cyrm}',
u'\u043d': '{\\cyrchar\\cyrn}',
u'\u043e': '{\\cyrchar\\cyro}',
u'\u043f': '{\\cyrchar\\cyrp}',
u'\u0440': '{\\cyrchar\\cyrr}',
u'\u0441': '{\\cyrchar\\cyrs}',
u'\u0442': '{\\cyrchar\\cyrt}',
u'\u0443': '{\\cyrchar\\cyru}',
u'\u0444': '{\\cyrchar\\cyrf}',
u'\u0445': '{\\cyrchar\\cyrh}',
u'\u0446': '{\\cyrchar\\cyrc}',
u'\u0447': '{\\cyrchar\\cyrch}',
u'\u0448': '{\\cyrchar\\cyrsh}',
u'\u0449': '{\\cyrchar\\cyrshch}',
u'\u044a': '{\\cyrchar\\cyrhrdsn}',
u'\u044b': '{\\cyrchar\\cyrery}',
u'\u044c': '{\\cyrchar\\cyrsftsn}',
u'\u044d': '{\\cyrchar\\cyrerev}',
u'\u044e': '{\\cyrchar\\cyryu}',
u'\u044f': '{\\cyrchar\\cyrya}',
u'\u0451': '{\\cyrchar\\cyryo}',
u'\u0452': '{\\cyrchar\\cyrdje}',
u'\u0453': "{\\cyrchar{\\'\\cyrg}}",
u'\u0454': '{\\cyrchar\\cyrie}',
u'\u0455': '{\\cyrchar\\cyrdze}',
u'\u0456': '{\\cyrchar\\cyrii}',
u'\u0457': '{\\cyrchar\\cyryi}',
u'\u0458': '{\\cyrchar\\cyrje}',
u'\u0459': '{\\cyrchar\\cyrlje}',
u'\u045a': '{\\cyrchar\\cyrnje}',
u'\u045b': '{\\cyrchar\\cyrtshe}',
u'\u045c': "{\\cyrchar{\\'\\cyrk}}",
u'\u045e': '{\\cyrchar\\cyrushrt}',
u'\u045f': '{\\cyrchar\\cyrdzhe}',
u'\u0460': '{\\cyrchar\\CYROMEGA}',
u'\u0461': '{\\cyrchar\\cyromega}',
u'\u0462': '{\\cyrchar\\CYRYAT}',
u'\u0464': '{\\cyrchar\\CYRIOTE}',
u'\u0465': '{\\cyrchar\\cyriote}',
u'\u0466': '{\\cyrchar\\CYRLYUS}',
u'\u0467': '{\\cyrchar\\cyrlyus}',
u'\u0468': '{\\cyrchar\\CYRIOTLYUS}',
u'\u0469': '{\\cyrchar\\cyriotlyus}',
u'\u046a': '{\\cyrchar\\CYRBYUS}',
u'\u046c': '{\\cyrchar\\CYRIOTBYUS}',
u'\u046d': '{\\cyrchar\\cyriotbyus}',
u'\u046e': '{\\cyrchar\\CYRKSI}',
u'\u046f': '{\\cyrchar\\cyrksi}',
u'\u0470': '{\\cyrchar\\CYRPSI}',
u'\u0471': '{\\cyrchar\\cyrpsi}',
u'\u0472': '{\\cyrchar\\CYRFITA}',
u'\u0474': '{\\cyrchar\\CYRIZH}',
u'\u0478': '{\\cyrchar\\CYRUK}',
u'\u0479': '{\\cyrchar\\cyruk}',
u'\u047a': '{\\cyrchar\\CYROMEGARND}',
u'\u047b': '{\\cyrchar\\cyromegarnd}',
u'\u047c': '{\\cyrchar\\CYROMEGATITLO}',
u'\u047d': '{\\cyrchar\\cyromegatitlo}',
u'\u047e': '{\\cyrchar\\CYROT}',
u'\u047f': '{\\cyrchar\\cyrot}',
u'\u0480': '{\\cyrchar\\CYRKOPPA}',
u'\u0481': '{\\cyrchar\\cyrkoppa}',
u'\u0482': '{\\cyrchar\\cyrthousands}',
u'\u0488': '{\\cyrchar\\cyrhundredthousands}',
u'\u0489': '{\\cyrchar\\cyrmillions}',
u'\u048c': '{\\cyrchar\\CYRSEMISFTSN}',
u'\u048d': '{\\cyrchar\\cyrsemisftsn}',
u'\u048e': '{\\cyrchar\\CYRRTICK}',
u'\u048f': '{\\cyrchar\\cyrrtick}',
u'\u0490': '{\\cyrchar\\CYRGUP}',
u'\u0491': '{\\cyrchar\\cyrgup}',
u'\u0492': '{\\cyrchar\\CYRGHCRS}',
u'\u0493': '{\\cyrchar\\cyrghcrs}',
u'\u0494': '{\\cyrchar\\CYRGHK}',
u'\u0495': '{\\cyrchar\\cyrghk}',
u'\u0496': '{\\cyrchar\\CYRZHDSC}',
u'\u0497': '{\\cyrchar\\cyrzhdsc}',
u'\u0498': '{\\cyrchar\\CYRZDSC}',
u'\u0499': '{\\cyrchar\\cyrzdsc}',
u'\u049a': '{\\cyrchar\\CYRKDSC}',
u'\u049b': '{\\cyrchar\\cyrkdsc}',
u'\u049c': '{\\cyrchar\\CYRKVCRS}',
u'\u049d': '{\\cyrchar\\cyrkvcrs}',
u'\u049e': '{\\cyrchar\\CYRKHCRS}',
u'\u049f': '{\\cyrchar\\cyrkhcrs}',
u'\u04a0': '{\\cyrchar\\CYRKBEAK}',
u'\u04a1': '{\\cyrchar\\cyrkbeak}',
u'\u04a2': '{\\cyrchar\\CYRNDSC}',
u'\u04a3': '{\\cyrchar\\cyrndsc}',
u'\u04a4': '{\\cyrchar\\CYRNG}',
u'\u04a5': '{\\cyrchar\\cyrng}',
u'\u04a6': '{\\cyrchar\\CYRPHK}',
u'\u04a7': '{\\cyrchar\\cyrphk}',
u'\u04a8': '{\\cyrchar\\CYRABHHA}',
u'\u04a9': '{\\cyrchar\\cyrabhha}',
u'\u04aa': '{\\cyrchar\\CYRSDSC}',
u'\u04ab': '{\\cyrchar\\cyrsdsc}',
u'\u04ac': '{\\cyrchar\\CYRTDSC}',
u'\u04ad': '{\\cyrchar\\cyrtdsc}',
u'\u04ae': '{\\cyrchar\\CYRY}',
u'\u04af': '{\\cyrchar\\cyry}',
u'\u04b0': '{\\cyrchar\\CYRYHCRS}',
u'\u04b1': '{\\cyrchar\\cyryhcrs}',
u'\u04b2': '{\\cyrchar\\CYRHDSC}',
u'\u04b3': '{\\cyrchar\\cyrhdsc}',
u'\u04b4': '{\\cyrchar\\CYRTETSE}',
u'\u04b5': '{\\cyrchar\\cyrtetse}',
u'\u04b6': '{\\cyrchar\\CYRCHRDSC}',
u'\u04b7': '{\\cyrchar\\cyrchrdsc}',
u'\u04b8': '{\\cyrchar\\CYRCHVCRS}',
u'\u04b9': '{\\cyrchar\\cyrchvcrs}',
u'\u04ba': '{\\cyrchar\\CYRSHHA}',
u'\u04bb': '{\\cyrchar\\cyrshha}',
u'\u04bc': '{\\cyrchar\\CYRABHCH}',
u'\u04bd': '{\\cyrchar\\cyrabhch}',
u'\u04be': '{\\cyrchar\\CYRABHCHDSC}',
u'\u04bf': '{\\cyrchar\\cyrabhchdsc}',
u'\u04c0': '{\\cyrchar\\CYRpalochka}',
u'\u04c3': '{\\cyrchar\\CYRKHK}',
u'\u04c4': '{\\cyrchar\\cyrkhk}',
u'\u04c7': '{\\cyrchar\\CYRNHK}',
u'\u04c8': '{\\cyrchar\\cyrnhk}',
u'\u04cb': '{\\cyrchar\\CYRCHLDSC}',
u'\u04cc': '{\\cyrchar\\cyrchldsc}',
u'\u04d4': '{\\cyrchar\\CYRAE}',
u'\u04d5': '{\\cyrchar\\cyrae}',
u'\u04d8': '{\\cyrchar\\CYRSCHWA}',
u'\u04d9': '{\\cyrchar\\cyrschwa}',
u'\u04e0': '{\\cyrchar\\CYRABHDZE}',
u'\u04e1': '{\\cyrchar\\cyrabhdze}',
u'\u04e8': '{\\cyrchar\\CYROTLD}',
u'\u04e9': '{\\cyrchar\\cyrotld}',
u'\u2002': '{\\hspace{0.6em}}',
u'\u2003': '{\\hspace{1em}}',
u'\u2004': '{\\hspace{0.33em}}',
u'\u2005': '{\\hspace{0.25em}}',
u'\u2006': '{\\hspace{0.166em}}',
u'\u2007': '{\\hphantom{0}}',
u'\u2008': '{\\hphantom{,}}',
u'\u2009': '{\\hspace{0.167em}}',
u'\u200a': '$\\mkern1mu$',
u'\u2010': '{-}',
u'\u2013': '{\\textendash}',
u'\u2014': '{\\textemdash}',
u'\u2015': '{\\rule{1em}{1pt}}',
u'\u2016': '$\\Vert$',
u'\u2018': '{`}',
u'\u2019': "{'}",
u'\u201a': '{,}',
u'\u201b': '$\\Elzreapos$',
u'\u201c': '{\\textquotedblleft}',
u'\u201d': '{\\textquotedblright}',
u'\u201e': '{,,}',
u'\u2020': '{\\textdagger}',
u'\u2021': '{\\textdaggerdbl}',
u'\u2022': '{\\textbullet}',
u'\u2024': '{.}',
u'\u2025': '{..}',
u'\u2026': '{\\ldots}',
u'\u2030': '{\\textperthousand}',
u'\u2031': '{\\textpertenthousand}',
u'\u2032': "${'}$",
u'\u2033': "${''}$",
u'\u2034': "${'''}$",
u'\u2035': '$\\backprime$',
u'\u2039': '{\\guilsinglleft}',
u'\u203a': '{\\guilsinglright}',
u'\u2057': "$''''$",
u'\u205f': '{\\mkern4mu}',
u'\u2060': '{\\nolinebreak}',
u'\u20a7': '{\\ensuremath{\\Elzpes}}',
u'\u20ac': '{\\mbox{\\texteuro}}',
u'\u20db': '$\\dddot$',
u'\u20dc': '$\\ddddot$',
u'\u2102': '$\\mathbb{C}$',
u'\u210a': '{\\mathscr{g}}',
u'\u210b': '$\\mathscr{H}$',
u'\u210c': '$\\mathfrak{H}$',
u'\u210d': '$\\mathbb{H}$',
u'\u210f': '$\\hslash$',
u'\u2110': '$\\mathscr{I}$',
u'\u2111': '$\\mathfrak{I}$',
u'\u2112': '$\\mathscr{L}$',
u'\u2113': '$\\mathscr{l}$',
u'\u2115': '$\\mathbb{N}$',
u'\u2116': '{\\cyrchar\\textnumero}',
u'\u2118': '$\\wp$',
u'\u2119': '$\\mathbb{P}$',
u'\u211a': '$\\mathbb{Q}$',
u'\u211b': '$\\mathscr{R}$',
u'\u211c': '$\\mathfrak{R}$',
u'\u211d': '$\\mathbb{R}$',
u'\u211e': '$\\Elzxrat$',
u'\u2122': '{\\texttrademark}',
u'\u2124': '$\\mathbb{Z}$',
u'\u2126': '$\\Omega$',
u'\u2127': '$\\mho$',
u'\u2128': '$\\mathfrak{Z}$',
u'\u2129': '$\\ElsevierGlyph{2129}$',
u'\u212b': '{\\AA}',
u'\u212c': '$\\mathscr{B}$',
u'\u212d': '$\\mathfrak{C}$',
u'\u212f': '$\\mathscr{e}$',
u'\u2130': '$\\mathscr{E}$',
u'\u2131': '$\\mathscr{F}$',
u'\u2133': '$\\mathscr{M}$',
u'\u2134': '$\\mathscr{o}$',
u'\u2135': '$\\aleph$',
u'\u2136': '$\\beth$',
u'\u2137': '$\\gimel$',
u'\u2138': '$\\daleth$',
u'\u2153': '$\\textfrac{1}{3}$',
u'\u2154': '$\\textfrac{2}{3}$',
u'\u2155': '$\\textfrac{1}{5}$',
u'\u2156': '$\\textfrac{2}{5}$',
u'\u2157': '$\\textfrac{3}{5}$',
u'\u2158': '$\\textfrac{4}{5}$',
u'\u2159': '$\\textfrac{1}{6}$',
u'\u215a': '$\\textfrac{5}{6}$',
u'\u215b': '$\\textfrac{1}{8}$',
u'\u215c': '$\\textfrac{3}{8}$',
u'\u215d': '$\\textfrac{5}{8}$',
u'\u215e': '$\\textfrac{7}{8}$',
u'\u2190': '$\\leftarrow$',
u'\u2191': '$\\uparrow$',
u'\u2192': '$\\rightarrow$',
u'\u2193': '$\\downarrow$',
u'\u2194': '$\\leftrightarrow$',
u'\u2195': '$\\updownarrow$',
u'\u2196': '$\\nwarrow$',
u'\u2197': '$\\nearrow$',
u'\u2198': '$\\searrow$',
u'\u2199': '$\\swarrow$',
u'\u219a': '$\\nleftarrow$',
u'\u219b': '$\\nrightarrow$',
u'\u219c': '$\\arrowwaveright$',
u'\u219d': '$\\arrowwaveright$',
u'\u219e': '$\\twoheadleftarrow$',
u'\u21a0': '$\\twoheadrightarrow$',
u'\u21a2': '$\\leftarrowtail$',
u'\u21a3': '$\\rightarrowtail$',
u'\u21a6': '$\\mapsto$',
u'\u21a9': '$\\hookleftarrow$',
u'\u21aa': '$\\hookrightarrow$',
u'\u21ab': '$\\looparrowleft$',
u'\u21ac': '$\\looparrowright$',
u'\u21ad': '$\\leftrightsquigarrow$',
u'\u21ae': '$\\nleftrightarrow$',
u'\u21b0': '$\\Lsh$',
u'\u21b1': '$\\Rsh$',
u'\u21b3': '$\\ElsevierGlyph{21B3}$',
u'\u21b6': '$\\curvearrowleft$',
u'\u21b7': '$\\curvearrowright$',
u'\u21ba': '$\\circlearrowleft$',
u'\u21bb': '$\\circlearrowright$',
u'\u21bc': '$\\leftharpoonup$',
u'\u21bd': '$\\leftharpoondown$',
u'\u21be': '$\\upharpoonright$',
u'\u21bf': '$\\upharpoonleft$',
u'\u21c0': '$\\rightharpoonup$',
u'\u21c1': '$\\rightharpoondown$',
u'\u21c2': '$\\downharpoonright$',
u'\u21c3': '$\\downharpoonleft$',
u'\u21c4': '$\\rightleftarrows$',
u'\u21c5': '$\\dblarrowupdown$',
u'\u21c6': '$\\leftrightarrows$',
u'\u21c7': '$\\leftleftarrows$',
u'\u21c8': '$\\upuparrows$',
u'\u21c9': '$\\rightrightarrows$',
u'\u21ca': '$\\downdownarrows$',
u'\u21cb': '$\\leftrightharpoons$',
u'\u21cc': '$\\rightleftharpoons$',
u'\u21cd': '$\\nLeftarrow$',
u'\u21ce': '$\\nLeftrightarrow$',
u'\u21cf': '$\\nRightarrow$',
u'\u21d0': '$\\Leftarrow$',
u'\u21d1': '$\\Uparrow$',
u'\u21d2': '$\\Rightarrow$',
u'\u21d3': '$\\Downarrow$',
u'\u21d4': '$\\Leftrightarrow$',
u'\u21d5': '$\\Updownarrow$',
u'\u21da': '$\\Lleftarrow$',
u'\u21db': '$\\Rrightarrow$',
u'\u21dd': '$\\rightsquigarrow$',
u'\u21f5': '$\\DownArrowUpArrow$',
u'\u2200': '$\\forall$',
u'\u2201': '$\\complement$',
u'\u2202': '$\\partial$',
u'\u2203': '$\\exists$',
u'\u2204': '$\\nexists$',
u'\u2205': '$\\varnothing$',
u'\u2207': '$\\nabla$',
u'\u2208': '$\\in$',
u'\u2209': '$\\not\\in$',
u'\u220b': '$\\ni$',
u'\u220c': '$\\not\\ni$',
u'\u220f': '$\\prod$',
u'\u2210': '$\\coprod$',
u'\u2211': '$\\sum$',
u'\u2212': '{-}',
u'\u2213': '$\\mp$',
u'\u2214': '$\\dotplus$',
u'\u2216': '$\\setminus$',
u'\u2217': '${_\\ast}$',
u'\u2218': '$\\circ$',
u'\u2219': '$\\bullet$',
u'\u221a': '$\\surd$',
u'\u221d': '$\\propto$',
u'\u221e': '$\\infty$',
u'\u221f': '$\\rightangle$',
u'\u2220': '$\\angle$',
u'\u2221': '$\\measuredangle$',
u'\u2222': '$\\sphericalangle$',
u'\u2223': '$\\mid$',
u'\u2224': '$\\nmid$',
u'\u2225': '$\\parallel$',
u'\u2226': '$\\nparallel$',
u'\u2227': '$\\wedge$',
u'\u2228': '$\\vee$',
u'\u2229': '$\\cap$',
u'\u222a': '$\\cup$',
u'\u222b': '$\\int$',
u'\u222c': '$\\int\\!\\int$',
u'\u222d': '$\\int\\!\\int\\!\\int$',
u'\u222e': '$\\oint$',
u'\u222f': '$\\surfintegral$',
u'\u2230': '$\\volintegral$',
u'\u2231': '$\\clwintegral$',
u'\u2232': '$\\ElsevierGlyph{2232}$',
u'\u2233': '$\\ElsevierGlyph{2233}$',
u'\u2234': '$\\therefore$',
u'\u2235': '$\\because$',
u'\u2237': '$\\Colon$',
u'\u2238': '$\\ElsevierGlyph{2238}$',
u'\u223a': '$\\mathbin{{:}\\!\\!{-}\\!\\!{:}}$',
u'\u223b': '$\\homothetic$',
u'\u223c': '$\\sim$',
u'\u223d': '$\\backsim$',
u'\u223e': '$\\lazysinv$',
u'\u2240': '$\\wr$',
u'\u2241': '$\\not\\sim$',
u'\u2242': '$\\ElsevierGlyph{2242}$',
u'\u2243': '$\\simeq$',
u'\u2244': '$\\not\\simeq$',
u'\u2245': '$\\cong$',
u'\u2246': '$\\approxnotequal$',
u'\u2247': '$\\not\\cong$',
u'\u2248': '$\\approx$',
u'\u2249': '$\\not\\approx$',
u'\u224a': '$\\approxeq$',
u'\u224b': '$\\tildetrpl$',
u'\u224c': '$\\allequal$',
u'\u224d': '$\\asymp$',
u'\u224e': '$\\Bumpeq$',
u'\u224f': '$\\bumpeq$',
u'\u2250': '$\\doteq$',
u'\u2251': '$\\doteqdot$',
u'\u2252': '$\\fallingdotseq$',
u'\u2253': '$\\risingdotseq$',
u'\u2254': '{:=}',
u'\u2255': '$=:$',
u'\u2256': '$\\eqcirc$',
u'\u2257': '$\\circeq$',
u'\u2259': '$\\estimates$',
u'\u225a': '$\\ElsevierGlyph{225A}$',
u'\u225b': '$\\starequal$',
u'\u225c': '$\\triangleq$',
u'\u225f': '$\\ElsevierGlyph{225F}$',
u'\u2260': '$\\not =$',
u'\u2261': '$\\equiv$',
u'\u2262': '$\\not\\equiv$',
u'\u2264': '$\\leq$',
u'\u2265': '$\\geq$',
u'\u2266': '$\\leqq$',
u'\u2267': '$\\geqq$',
u'\u2268': '$\\lneqq$',
u'\u2269': '$\\gneqq$',
u'\u226a': '$\\ll$',
u'\u226b': '$\\gg$',
u'\u226c': '$\\between$',
u'\u226d': '$\\not\\kern-0.3em\\times$',
u'\u226e': '$\\not<$',
u'\u226f': '$\\not>$',
u'\u2270': '$\\not\\leq$',
u'\u2271': '$\\not\\geq$',
u'\u2272': '$\\lessequivlnt$',
u'\u2273': '$\\greaterequivlnt$',
u'\u2274': '$\\ElsevierGlyph{2274}$',
u'\u2275': '$\\ElsevierGlyph{2275}$',
u'\u2276': '$\\lessgtr$',
u'\u2277': '$\\gtrless$',
u'\u2278': '$\\notlessgreater$',
u'\u2279': '$\\notgreaterless$',
u'\u227a': '$\\prec$',
u'\u227b': '$\\succ$',
u'\u227c': '$\\preccurlyeq$',
u'\u227d': '$\\succcurlyeq$',
u'\u227e': '$\\precapprox$',
u'\u227f': '$\\succapprox$',
u'\u2280': '$\\not\\prec$',
u'\u2281': '$\\not\\succ$',
u'\u2282': '$\\subset$',
u'\u2283': '$\\supset$',
u'\u2284': '$\\not\\subset$',
u'\u2285': '$\\not\\supset$',
u'\u2286': '$\\subseteq$',
u'\u2287': '$\\supseteq$',
u'\u2288': '$\\not\\subseteq$',
u'\u2289': '$\\not\\supseteq$',
u'\u228a': '$\\subsetneq$',
u'\u228b': '$\\supsetneq$',
u'\u228e': '$\\uplus$',
u'\u228f': '$\\sqsubset$',
u'\u2290': '$\\sqsupset$',
u'\u2291': '$\\sqsubseteq$',
u'\u2292': '$\\sqsupseteq$',
u'\u2293': '$\\sqcap$',
u'\u2294': '$\\sqcup$',
u'\u2295': '$\\oplus$',
u'\u2296': '$\\ominus$',
u'\u2297': '$\\otimes$',
u'\u2298': '$\\oslash$',
u'\u2299': '$\\odot$',
u'\u229a': '$\\circledcirc$',
u'\u229b': '$\\circledast$',
u'\u229d': '$\\circleddash$',
u'\u229e': '$\\boxplus$',
u'\u229f': '$\\boxminus$',
u'\u22a0': '$\\boxtimes$',
u'\u22a1': '$\\boxdot$',
u'\u22a2': '$\\vdash$',
u'\u22a3': '$\\dashv$',
u'\u22a4': '$\\top$',
u'\u22a5': '$\\perp$',
u'\u22a7': '$\\truestate$',
u'\u22a8': '$\\forcesextra$',
u'\u22a9': '$\\Vdash$',
u'\u22aa': '$\\Vvdash$',
u'\u22ab': '$\\VDash$',
u'\u22ac': '$\\nvdash$',
u'\u22ad': '$\\nvDash$',
u'\u22ae': '$\\nVdash$',
u'\u22af': '$\\nVDash$',
u'\u22b2': '$\\vartriangleleft$',
u'\u22b3': '$\\vartriangleright$',
u'\u22b4': '$\\trianglelefteq$',
u'\u22b5': '$\\trianglerighteq$',
u'\u22b6': '$\\original$',
u'\u22b7': '$\\image$',
u'\u22b8': '$\\multimap$',
u'\u22b9': '$\\hermitconjmatrix$',
u'\u22ba': '$\\intercal$',
u'\u22bb': '$\\veebar$',
u'\u22be': '$\\rightanglearc$',
u'\u22c0': '$\\ElsevierGlyph{22C0}$',
u'\u22c1': '$\\ElsevierGlyph{22C1}$',
u'\u22c2': '$\\bigcap$',
u'\u22c3': '$\\bigcup$',
u'\u22c4': '$\\diamond$',
u'\u22c5': '$\\cdot$',
u'\u22c6': '$\\star$',
u'\u22c7': '$\\divideontimes$',
u'\u22c8': '$\\bowtie$',
u'\u22c9': '$\\ltimes$',
u'\u22ca': '$\\rtimes$',
u'\u22cb': '$\\leftthreetimes$',
u'\u22cc': '$\\rightthreetimes$',
u'\u22cd': '$\\backsimeq$',
u'\u22ce': '$\\curlyvee$',
u'\u22cf': '$\\curlywedge$',
u'\u22d0': '$\\Subset$',
u'\u22d1': '$\\Supset$',
u'\u22d2': '$\\Cap$',
u'\u22d3': '$\\Cup$',
u'\u22d4': '$\\pitchfork$',
u'\u22d6': '$\\lessdot$',
u'\u22d7': '$\\gtrdot$',
u'\u22d8': '$\\verymuchless$',
u'\u22d9': '$\\verymuchgreater$',
u'\u22da': '$\\lesseqgtr$',
u'\u22db': '$\\gtreqless$',
u'\u22de': '$\\curlyeqprec$',
u'\u22df': '$\\curlyeqsucc$',
u'\u22e2': '$\\not\\sqsubseteq$',
u'\u22e3': '$\\not\\sqsupseteq$',
u'\u22e5': '$\\Elzsqspne$',
u'\u22e6': '$\\lnsim$',
u'\u22e7': '$\\gnsim$',
u'\u22e8': '$\\precedesnotsimilar$',
u'\u22e9': '$\\succnsim$',
u'\u22ea': '$\\ntriangleleft$',
u'\u22eb': '$\\ntriangleright$',
u'\u22ec': '$\\ntrianglelefteq$',
u'\u22ed': '$\\ntrianglerighteq$',
u'\u22ee': '$\\vdots$',
u'\u22ef': '$\\cdots$',
u'\u22f0': '$\\upslopeellipsis$',
u'\u22f1': '$\\downslopeellipsis$',
u'\u2305': '{\\barwedge}',
u'\u2306': '$\\perspcorrespond$',
u'\u2308': '$\\lceil$',
u'\u2309': '$\\rceil$',
u'\u230a': '$\\lfloor$',
u'\u230b': '$\\rfloor$',
u'\u2315': '$\\recorder$',
u'\u2316': '$\\mathchar"2208$',
u'\u231c': '$\\ulcorner$',
u'\u231d': '$\\urcorner$',
u'\u231e': '$\\llcorner$',
u'\u231f': '$\\lrcorner$',
u'\u2322': '$\\frown$',
u'\u2323': '$\\smile$',
u'\u2329': '$\\langle$',
u'\u232a': '$\\rangle$',
u'\u233d': '$\\ElsevierGlyph{E838}$',
u'\u23a3': '$\\Elzdlcorn$',
u'\u23b0': '$\\lmoustache$',
u'\u23b1': '$\\rmoustache$',
u'\u2423': '{\\textvisiblespace}',
u'\u2460': '{\\ding{172}}',
u'\u2461': '{\\ding{173}}',
u'\u2462': '{\\ding{174}}',
u'\u2463': '{\\ding{175}}',
u'\u2464': '{\\ding{176}}',
u'\u2465': '{\\ding{177}}',
u'\u2466': '{\\ding{178}}',
u'\u2467': '{\\ding{179}}',
u'\u2468': '{\\ding{180}}',
u'\u2469': '{\\ding{181}}',
u'\u24c8': '$\\circledS$',
u'\u2506': '$\\Elzdshfnc$',
u'\u2519': '$\\Elzsqfnw$',
u'\u2571': '$\\diagup$',
u'\u25a0': '{\\ding{110}}',
u'\u25a1': '$\\square$',
u'\u25aa': '$\\blacksquare$',
u'\u25ad': '$\\fbox{~~}$',
u'\u25af': '$\\Elzvrecto$',
u'\u25b1': '$\\ElsevierGlyph{E381}$',
u'\u25b2': '{\\ding{115}}',
u'\u25b3': '$\\bigtriangleup$',
u'\u25b4': '$\\blacktriangle$',
u'\u25b5': '$\\vartriangle$',
u'\u25b8': '$\\blacktriangleright$',
u'\u25b9': '$\\triangleright$',
u'\u25bc': '{\\ding{116}}',
u'\u25bd': '$\\bigtriangledown$',
u'\u25be': '$\\blacktriangledown$',
u'\u25bf': '$\\triangledown$',
u'\u25c2': '$\\blacktriangleleft$',
u'\u25c3': '$\\triangleleft$',
u'\u25c6': '{\\ding{117}}',
u'\u25ca': '$\\lozenge$',
u'\u25cb': '$\\bigcirc$',
u'\u25cf': '{\\ding{108}}',
u'\u25d0': '$\\Elzcirfl$',
u'\u25d1': '$\\Elzcirfr$',
u'\u25d2': '$\\Elzcirfb$',
u'\u25d7': '{\\ding{119}}',
u'\u25d8': '$\\Elzrvbull$',
u'\u25e7': '$\\Elzsqfl$',
u'\u25e8': '$\\Elzsqfr$',
u'\u25ea': '$\\Elzsqfse$',
u'\u25ef': '$\\bigcirc$',
u'\u2605': '{\\ding{72}}',
u'\u2606': '{\\ding{73}}',
u'\u260e': '{\\ding{37}}',
u'\u261b': '{\\ding{42}}',
u'\u261e': '{\\ding{43}}',
u'\u263e': '{\\rightmoon}',
u'\u263f': '{\\mercury}',
u'\u2640': '{\\venus}',
u'\u2642': '{\\male}',
u'\u2643': '{\\jupiter}',
u'\u2644': '{\\saturn}',
u'\u2645': '{\\uranus}',
u'\u2646': '{\\neptune}',
u'\u2647': '{\\pluto}',
u'\u2648': '{\\aries}',
u'\u2649': '{\\taurus}',
u'\u264a': '{\\gemini}',
u'\u264b': '{\\cancer}',
u'\u264c': '{\\leo}',
u'\u264d': '{\\virgo}',
u'\u264e': '{\\libra}',
u'\u264f': '{\\scorpio}',
u'\u2650': '{\\sagittarius}',
u'\u2651': '{\\capricornus}',
u'\u2652': '{\\aquarius}',
u'\u2653': '{\\pisces}',
u'\u2660': '{\\ding{171}}',
u'\u2662': '$\\diamond$',
u'\u2663': '{\\ding{168}}',
u'\u2665': '{\\ding{170}}',
u'\u2666': '{\\ding{169}}',
u'\u2669': '{\\quarternote}',
u'\u266a': '{\\eighthnote}',
u'\u266d': '$\\flat$',
u'\u266e': '$\\natural$',
u'\u266f': '$\\sharp$',
u'\u2701': '{\\ding{33}}',
u'\u2702': '{\\ding{34}}',
u'\u2703': '{\\ding{35}}',
u'\u2704': '{\\ding{36}}',
u'\u2706': '{\\ding{38}}',
u'\u2707': '{\\ding{39}}',
u'\u2708': '{\\ding{40}}',
u'\u2709': '{\\ding{41}}',
u'\u270c': '{\\ding{44}}',
u'\u270d': '{\\ding{45}}',
u'\u270e': '{\\ding{46}}',
u'\u270f': '{\\ding{47}}',
u'\u2710': '{\\ding{48}}',
u'\u2711': '{\\ding{49}}',
u'\u2712': '{\\ding{50}}',
u'\u2713': '{\\ding{51}}',
u'\u2714': '{\\ding{52}}',
u'\u2715': '{\\ding{53}}',
u'\u2716': '{\\ding{54}}',
u'\u2717': '{\\ding{55}}',
u'\u2718': '{\\ding{56}}',
u'\u2719': '{\\ding{57}}',
u'\u271a': '{\\ding{58}}',
u'\u271b': '{\\ding{59}}',
u'\u271c': '{\\ding{60}}',
u'\u271d': '{\\ding{61}}',
u'\u271e': '{\\ding{62}}',
u'\u271f': '{\\ding{63}}',
u'\u2720': '{\\ding{64}}',
u'\u2721': '{\\ding{65}}',
u'\u2722': '{\\ding{66}}',
u'\u2723': '{\\ding{67}}',
u'\u2724': '{\\ding{68}}',
u'\u2725': '{\\ding{69}}',
u'\u2726': '{\\ding{70}}',
u'\u2727': '{\\ding{71}}',
u'\u2729': '{\\ding{73}}',
u'\u272a': '{\\ding{74}}',
u'\u272b': '{\\ding{75}}',
u'\u272c': '{\\ding{76}}',
u'\u272d': '{\\ding{77}}',
u'\u272e': '{\\ding{78}}',
u'\u272f': '{\\ding{79}}',
u'\u2730': '{\\ding{80}}',
u'\u2731': '{\\ding{81}}',
u'\u2732': '{\\ding{82}}',
u'\u2733': '{\\ding{83}}',
u'\u2734': '{\\ding{84}}',
u'\u2735': '{\\ding{85}}',
u'\u2736': '{\\ding{86}}',
u'\u2737': '{\\ding{87}}',
u'\u2738': '{\\ding{88}}',
u'\u2739': '{\\ding{89}}',
u'\u273a': '{\\ding{90}}',
u'\u273b': '{\\ding{91}}',
u'\u273c': '{\\ding{92}}',
u'\u273d': '{\\ding{93}}',
u'\u273e': '{\\ding{94}}',
u'\u273f': '{\\ding{95}}',
u'\u2740': '{\\ding{96}}',
u'\u2741': '{\\ding{97}}',
u'\u2742': '{\\ding{98}}',
u'\u2743': '{\\ding{99}}',
u'\u2744': '{\\ding{100}}',
u'\u2745': '{\\ding{101}}',
u'\u2746': '{\\ding{102}}',
u'\u2747': '{\\ding{103}}',
u'\u2748': '{\\ding{104}}',
u'\u2749': '{\\ding{105}}',
u'\u274a': '{\\ding{106}}',
u'\u274b': '{\\ding{107}}',
u'\u274d': '{\\ding{109}}',
u'\u274f': '{\\ding{111}}',
u'\u2750': '{\\ding{112}}',
u'\u2751': '{\\ding{113}}',
u'\u2752': '{\\ding{114}}',
u'\u2756': '{\\ding{118}}',
u'\u2758': '{\\ding{120}}',
u'\u2759': '{\\ding{121}}',
u'\u275a': '{\\ding{122}}',
u'\u275b': '{\\ding{123}}',
u'\u275c': '{\\ding{124}}',
u'\u275d': '{\\ding{125}}',
u'\u275e': '{\\ding{126}}',
u'\u2761': '{\\ding{161}}',
u'\u2762': '{\\ding{162}}',
u'\u2763': '{\\ding{163}}',
u'\u2764': '{\\ding{164}}',
u'\u2765': '{\\ding{165}}',
u'\u2766': '{\\ding{166}}',
u'\u2767': '{\\ding{167}}',
u'\u2776': '{\\ding{182}}',
u'\u2777': '{\\ding{183}}',
u'\u2778': '{\\ding{184}}',
u'\u2779': '{\\ding{185}}',
u'\u277a': '{\\ding{186}}',
u'\u277b': '{\\ding{187}}',
u'\u277c': '{\\ding{188}}',
u'\u277d': '{\\ding{189}}',
u'\u277e': '{\\ding{190}}',
u'\u277f': '{\\ding{191}}',
u'\u2780': '{\\ding{192}}',
u'\u2781': '{\\ding{193}}',
u'\u2782': '{\\ding{194}}',
u'\u2783': '{\\ding{195}}',
u'\u2784': '{\\ding{196}}',
u'\u2785': '{\\ding{197}}',
u'\u2786': '{\\ding{198}}',
u'\u2787': '{\\ding{199}}',
u'\u2788': '{\\ding{200}}',
u'\u2789': '{\\ding{201}}',
u'\u278a': '{\\ding{202}}',
u'\u278b': '{\\ding{203}}',
u'\u278c': '{\\ding{204}}',
u'\u278d': '{\\ding{205}}',
u'\u278e': '{\\ding{206}}',
u'\u278f': '{\\ding{207}}',
u'\u2790': '{\\ding{208}}',
u'\u2791': '{\\ding{209}}',
u'\u2792': '{\\ding{210}}',
u'\u2793': '{\\ding{211}}',
u'\u2794': '{\\ding{212}}',
u'\u2798': '{\\ding{216}}',
u'\u2799': '{\\ding{217}}',
u'\u279a': '{\\ding{218}}',
u'\u279b': '{\\ding{219}}',
u'\u279c': '{\\ding{220}}',
u'\u279d': '{\\ding{221}}',
u'\u279e': '{\\ding{222}}',
u'\u279f': '{\\ding{223}}',
u'\u27a0': '{\\ding{224}}',
u'\u27a1': '{\\ding{225}}',
u'\u27a2': '{\\ding{226}}',
u'\u27a3': '{\\ding{227}}',
u'\u27a4': '{\\ding{228}}',
u'\u27a5': '{\\ding{229}}',
u'\u27a6': '{\\ding{230}}',
u'\u27a7': '{\\ding{231}}',
u'\u27a8': '{\\ding{232}}',
u'\u27a9': '{\\ding{233}}',
u'\u27aa': '{\\ding{234}}',
u'\u27ab': '{\\ding{235}}',
u'\u27ac': '{\\ding{236}}',
u'\u27ad': '{\\ding{237}}',
u'\u27ae': '{\\ding{238}}',
u'\u27af': '{\\ding{239}}',
u'\u27b1': '{\\ding{241}}',
u'\u27b2': '{\\ding{242}}',
u'\u27b3': '{\\ding{243}}',
u'\u27b4': '{\\ding{244}}',
u'\u27b5': '{\\ding{245}}',
u'\u27b6': '{\\ding{246}}',
u'\u27b7': '{\\ding{247}}',
u'\u27b8': '{\\ding{248}}',
u'\u27b9': '{\\ding{249}}',
u'\u27ba': '{\\ding{250}}',
u'\u27bb': '{\\ding{251}}',
u'\u27bc': '{\\ding{252}}',
u'\u27bd': '{\\ding{253}}',
u'\u27be': '{\\ding{254}}',
u'\u27f5': '$\\longleftarrow$',
u'\u27f6': '$\\longrightarrow$',
u'\u27f7': '$\\longleftrightarrow$',
u'\u27f8': '$\\Longleftarrow$',
u'\u27f9': '$\\Longrightarrow$',
u'\u27fa': '$\\Longleftrightarrow$',
u'\u27fc': '$\\longmapsto$',
u'\u27ff': '$\\sim\\joinrel\\leadsto$',
u'\u2905': '$\\ElsevierGlyph{E212}$',
u'\u2912': '$\\UpArrowBar$',
u'\u2913': '$\\DownArrowBar$',
u'\u2923': '$\\ElsevierGlyph{E20C}$',
u'\u2924': '$\\ElsevierGlyph{E20D}$',
u'\u2925': '$\\ElsevierGlyph{E20B}$',
u'\u2926': '$\\ElsevierGlyph{E20A}$',
u'\u2927': '$\\ElsevierGlyph{E211}$',
u'\u2928': '$\\ElsevierGlyph{E20E}$',
u'\u2929': '$\\ElsevierGlyph{E20F}$',
u'\u292a': '$\\ElsevierGlyph{E210}$',
u'\u2933': '$\\ElsevierGlyph{E21C}$',
u'\u2936': '$\\ElsevierGlyph{E21A}$',
u'\u2937': '$\\ElsevierGlyph{E219}$',
u'\u2940': '$\\Elolarr$',
u'\u2941': '$\\Elorarr$',
u'\u2942': '$\\ElzRlarr$',
u'\u2944': '$\\ElzrLarr$',
u'\u2947': '$\\Elzrarrx$',
u'\u294e': '$\\LeftRightVector$',
u'\u294f': '$\\RightUpDownVector$',
u'\u2950': '$\\DownLeftRightVector$',
u'\u2951': '$\\LeftUpDownVector$',
u'\u2952': '$\\LeftVectorBar$',
u'\u2953': '$\\RightVectorBar$',
u'\u2954': '$\\RightUpVectorBar$',
u'\u2955': '$\\RightDownVectorBar$',
u'\u2956': '$\\DownLeftVectorBar$',
u'\u2957': '$\\DownRightVectorBar$',
u'\u2958': '$\\LeftUpVectorBar$',
u'\u2959': '$\\LeftDownVectorBar$',
u'\u295a': '$\\LeftTeeVector$',
u'\u295b': '$\\RightTeeVector$',
u'\u295c': '$\\RightUpTeeVector$',
u'\u295d': '$\\RightDownTeeVector$',
u'\u295e': '$\\DownLeftTeeVector$',
u'\u295f': '$\\DownRightTeeVector$',
u'\u2960': '$\\LeftUpTeeVector$',
u'\u2961': '$\\LeftDownTeeVector$',
u'\u296e': '$\\UpEquilibrium$',
u'\u296f': '$\\ReverseUpEquilibrium$',
u'\u2970': '$\\RoundImplies$',
u'\u297c': '$\\ElsevierGlyph{E214}$',
u'\u297d': '$\\ElsevierGlyph{E215}$',
u'\u2980': '$\\Elztfnc$',
u'\u2985': '$\\ElsevierGlyph{3018}$',
u'\u2986': '$\\Elroang$',
u'\u2993': '$<\\kern-0.58em($',
u'\u2994': '$\\ElsevierGlyph{E291}$',
u'\u2999': '$\\Elzddfnc$',
u'\u299c': '$\\Angle$',
u'\u29a0': '$\\Elzlpargt$',
u'\u29b5': '$\\ElsevierGlyph{E260}$',
u'\u29b6': '$\\ElsevierGlyph{E61B}$',
u'\u29ca': '$\\ElzLap$',
u'\u29cb': '$\\Elzdefas$',
u'\u29cf': '$\\LeftTriangleBar$',
u'\u29d0': '$\\RightTriangleBar$',
u'\u29dc': '$\\ElsevierGlyph{E372}$',
u'\u29eb': '$\\blacklozenge$',
u'\u29f4': '$\\RuleDelayed$',
u'\u2a04': '$\\Elxuplus$',
u'\u2a05': '$\\ElzThr$',
u'\u2a06': '$\\Elxsqcup$',
u'\u2a07': '$\\ElzInf$',
u'\u2a08': '$\\ElzSup$',
u'\u2a0d': '$\\ElzCint$',
u'\u2a0f': '$\\clockoint$',
u'\u2a10': '$\\ElsevierGlyph{E395}$',
u'\u2a16': '$\\sqrint$',
u'\u2a25': '$\\ElsevierGlyph{E25A}$',
u'\u2a2a': '$\\ElsevierGlyph{E25B}$',
u'\u2a2d': '$\\ElsevierGlyph{E25C}$',
u'\u2a2e': '$\\ElsevierGlyph{E25D}$',
u'\u2a2f': '$\\ElzTimes$',
u'\u2a34': '$\\ElsevierGlyph{E25E}$',
u'\u2a35': '$\\ElsevierGlyph{E25E}$',
u'\u2a3c': '$\\ElsevierGlyph{E259}$',
u'\u2a3f': '$\\amalg$',
u'\u2a53': '$\\ElzAnd$',
u'\u2a54': '$\\ElzOr$',
u'\u2a55': '$\\ElsevierGlyph{E36E}$',
u'\u2a56': '$\\ElOr$',
u'\u2a5e': '$\\perspcorrespond$',
u'\u2a5f': '$\\Elzminhat$',
u'\u2a63': '$\\ElsevierGlyph{225A}$',
u'\u2a6e': '$\\stackrel{*}{=}$',
u'\u2a75': '$\\Equal$',
u'\u2a7d': '$\\leqslant$',
u'\u2a7e': '$\\geqslant$',
u'\u2a85': '$\\lessapprox$',
u'\u2a86': '$\\gtrapprox$',
u'\u2a87': '$\\lneq$',
u'\u2a88': '$\\gneq$',
u'\u2a89': '$\\lnapprox$',
u'\u2a8a': '$\\gnapprox$',
u'\u2a8b': '$\\lesseqqgtr$',
u'\u2a8c': '$\\gtreqqless$',
u'\u2a95': '$\\eqslantless$',
u'\u2a96': '$\\eqslantgtr$',
u'\u2a9d': '$\\Pisymbol{ppi020}{117}$',
u'\u2a9e': '$\\Pisymbol{ppi020}{105}$',
u'\u2aa1': '$\\NestedLessLess$',
u'\u2aa2': '$\\NestedGreaterGreater$',
u'\u2aaf': '$\\preceq$',
u'\u2ab0': '$\\succeq$',
u'\u2ab5': '$\\precneqq$',
u'\u2ab6': '$\\succneqq$',
u'\u2ab7': '$\\precapprox$',
u'\u2ab8': '$\\succapprox$',
u'\u2ab9': '$\\precnapprox$',
u'\u2aba': '$\\succnapprox$',
u'\u2ac5': '$\\subseteqq$',
u'\u2ac6': '$\\supseteqq$',
u'\u2acb': '$\\subsetneqq$',
u'\u2acc': '$\\supsetneqq$',
u'\u2aeb': '$\\ElsevierGlyph{E30D}$',
u'\u2af6': '$\\Elztdcol$',
u'\u2afd': '${{/}\\!\\!{/}}$',
u'\u300a': '$\\ElsevierGlyph{300A}$',
u'\u300b': '$\\ElsevierGlyph{300B}$',
u'\u3018': '$\\ElsevierGlyph{3018}$',
u'\u3019': '$\\ElsevierGlyph{3019}$',
u'\u301a': '$\\openbracketleft$',
u'\u301b': '$\\openbracketright$',
u'\ufb00': '{ff}',
u'\ufb01': '{fi}',
u'\ufb02': '{fl}',
u'\ufb03': '{ffi}',
u'\ufb04': '{ffl}',
u'\U0001d400': '$\\mathbf{A}$',
u'\U0001d401': '$\\mathbf{B}$',
u'\U0001d402': '$\\mathbf{C}$',
u'\U0001d403': '$\\mathbf{D}$',
u'\U0001d404': '$\\mathbf{E}$',
u'\U0001d405': '$\\mathbf{F}$',
u'\U0001d406': '$\\mathbf{G}$',
u'\U0001d407': '$\\mathbf{H}$',
u'\U0001d408': '$\\mathbf{I}$',
u'\U0001d409': '$\\mathbf{J}$',
u'\U0001d40a': '$\\mathbf{K}$',
u'\U0001d40b': '$\\mathbf{L}$',
u'\U0001d40c': '$\\mathbf{M}$',
u'\U0001d40d': '$\\mathbf{N}$',
u'\U0001d40e': '$\\mathbf{O}$',
u'\U0001d40f': '$\\mathbf{P}$',
u'\U0001d410': '$\\mathbf{Q}$',
u'\U0001d411': '$\\mathbf{R}$',
u'\U0001d412': '$\\mathbf{S}$',
u'\U0001d413': '$\\mathbf{T}$',
u'\U0001d414': '$\\mathbf{U}$',
u'\U0001d415': '$\\mathbf{V}$',
u'\U0001d416': '$\\mathbf{W}$',
u'\U0001d417': '$\\mathbf{X}$',
u'\U0001d418': '$\\mathbf{Y}$',
u'\U0001d419': '$\\mathbf{Z}$',
u'\U0001d41a': '$\\mathbf{a}$',
u'\U0001d41b': '$\\mathbf{b}$',
u'\U0001d41c': '$\\mathbf{c}$',
u'\U0001d41d': '$\\mathbf{d}$',
u'\U0001d41e': '$\\mathbf{e}$',
u'\U0001d41f': '$\\mathbf{f}$',
u'\U0001d420': '$\\mathbf{g}$',
u'\U0001d421': '$\\mathbf{h}$',
u'\U0001d422': '$\\mathbf{i}$',
u'\U0001d423': '$\\mathbf{j}$',
u'\U0001d424': '$\\mathbf{k}$',
u'\U0001d425': '$\\mathbf{l}$',
u'\U0001d426': '$\\mathbf{m}$',
u'\U0001d427': '$\\mathbf{n}$',
u'\U0001d428': '$\\mathbf{o}$',
u'\U0001d429': '$\\mathbf{p}$',
u'\U0001d42a': '$\\mathbf{q}$',
u'\U0001d42b': '$\\mathbf{r}$',
u'\U0001d42c': '$\\mathbf{s}$',
u'\U0001d42d': '$\\mathbf{t}$',
u'\U0001d42e': '$\\mathbf{u}$',
u'\U0001d42f': '$\\mathbf{v}$',
u'\U0001d430': '$\\mathbf{w}$',
u'\U0001d431': '$\\mathbf{x}$',
u'\U0001d432': '$\\mathbf{y}$',
u'\U0001d433': '$\\mathbf{z}$',
u'\U0001d434': '$\\mathsl{A}$',
u'\U0001d435': '$\\mathsl{B}$',
u'\U0001d436': '$\\mathsl{C}$',
u'\U0001d437': '$\\mathsl{D}$',
u'\U0001d438': '$\\mathsl{E}$',
u'\U0001d439': '$\\mathsl{F}$',
u'\U0001d43a': '$\\mathsl{G}$',
u'\U0001d43b': '$\\mathsl{H}$',
u'\U0001d43c': '$\\mathsl{I}$',
u'\U0001d43d': '$\\mathsl{J}$',
u'\U0001d43e': '$\\mathsl{K}$',
u'\U0001d43f': '$\\mathsl{L}$',
u'\U0001d440': '$\\mathsl{M}$',
u'\U0001d441': '$\\mathsl{N}$',
u'\U0001d442': '$\\mathsl{O}$',
u'\U0001d443': '$\\mathsl{P}$',
u'\U0001d444': '$\\mathsl{Q}$',
u'\U0001d445': '$\\mathsl{R}$',
u'\U0001d446': '$\\mathsl{S}$',
u'\U0001d447': '$\\mathsl{T}$',
u'\U0001d448': '$\\mathsl{U}$',
u'\U0001d449': '$\\mathsl{V}$',
u'\U0001d44a': '$\\mathsl{W}$',
u'\U0001d44b': '$\\mathsl{X}$',
u'\U0001d44c': '$\\mathsl{Y}$',
u'\U0001d44d': '$\\mathsl{Z}$',
u'\U0001d44e': '$\\mathsl{a}$',
u'\U0001d44f': '$\\mathsl{b}$',
u'\U0001d450': '$\\mathsl{c}$',
u'\U0001d451': '$\\mathsl{d}$',
u'\U0001d452': '$\\mathsl{e}$',
u'\U0001d453': '$\\mathsl{f}$',
u'\U0001d454': '$\\mathsl{g}$',
u'\U0001d456': '$\\mathsl{i}$',
u'\U0001d457': '$\\mathsl{j}$',
u'\U0001d458': '$\\mathsl{k}$',
u'\U0001d459': '$\\mathsl{l}$',
u'\U0001d45a': '$\\mathsl{m}$',
u'\U0001d45b': '$\\mathsl{n}$',
u'\U0001d45c': '$\\mathsl{o}$',
u'\U0001d45d': '$\\mathsl{p}$',
u'\U0001d45e': '$\\mathsl{q}$',
u'\U0001d45f': '$\\mathsl{r}$',
u'\U0001d460': '$\\mathsl{s}$',
u'\U0001d461': '$\\mathsl{t}$',
u'\U0001d462': '$\\mathsl{u}$',
u'\U0001d463': '$\\mathsl{v}$',
u'\U0001d464': '$\\mathsl{w}$',
u'\U0001d465': '$\\mathsl{x}$',
u'\U0001d466': '$\\mathsl{y}$',
u'\U0001d467': '$\\mathsl{z}$',
u'\U0001d468': '$\\mathbit{A}$',
u'\U0001d469': '$\\mathbit{B}$',
u'\U0001d46a': '$\\mathbit{C}$',
u'\U0001d46b': '$\\mathbit{D}$',
u'\U0001d46c': '$\\mathbit{E}$',
u'\U0001d46d': '$\\mathbit{F}$',
u'\U0001d46e': '$\\mathbit{G}$',
u'\U0001d46f': '$\\mathbit{H}$',
u'\U0001d470': '$\\mathbit{I}$',
u'\U0001d471': '$\\mathbit{J}$',
u'\U0001d472': '$\\mathbit{K}$',
u'\U0001d473': '$\\mathbit{L}$',
u'\U0001d474': '$\\mathbit{M}$',
u'\U0001d475': '$\\mathbit{N}$',
u'\U0001d476': '$\\mathbit{O}$',
u'\U0001d477': '$\\mathbit{P}$',
u'\U0001d478': '$\\mathbit{Q}$',
u'\U0001d479': '$\\mathbit{R}$',
u'\U0001d47a': '$\\mathbit{S}$',
u'\U0001d47b': '$\\mathbit{T}$',
u'\U0001d47c': '$\\mathbit{U}$',
u'\U0001d47d': '$\\mathbit{V}$',
u'\U0001d47e': '$\\mathbit{W}$',
u'\U0001d47f': '$\\mathbit{X}$',
u'\U0001d480': '$\\mathbit{Y}$',
u'\U0001d481': '$\\mathbit{Z}$',
u'\U0001d482': '$\\mathbit{a}$',
u'\U0001d483': '$\\mathbit{b}$',
u'\U0001d484': '$\\mathbit{c}$',
u'\U0001d485': '$\\mathbit{d}$',
u'\U0001d486': '$\\mathbit{e}$',
u'\U0001d487': '$\\mathbit{f}$',
u'\U0001d488': '$\\mathbit{g}$',
u'\U0001d489': '$\\mathbit{h}$',
u'\U0001d48a': '$\\mathbit{i}$',
u'\U0001d48b': '$\\mathbit{j}$',
u'\U0001d48c': '$\\mathbit{k}$',
u'\U0001d48d': '$\\mathbit{l}$',
u'\U0001d48e': '$\\mathbit{m}$',
u'\U0001d48f': '$\\mathbit{n}$',
u'\U0001d490': '$\\mathbit{o}$',
u'\U0001d491': '$\\mathbit{p}$',
u'\U0001d492': '$\\mathbit{q}$',
u'\U0001d493': '$\\mathbit{r}$',
u'\U0001d494': '$\\mathbit{s}$',
u'\U0001d495': '$\\mathbit{t}$',
u'\U0001d496': '$\\mathbit{u}$',
u'\U0001d497': '$\\mathbit{v}$',
u'\U0001d498': '$\\mathbit{w}$',
u'\U0001d499': '$\\mathbit{x}$',
u'\U0001d49a': '$\\mathbit{y}$',
u'\U0001d49b': '$\\mathbit{z}$',
u'\U0001d49c': '$\\mathscr{A}$',
u'\U0001d49e': '$\\mathscr{C}$',
u'\U0001d49f': '$\\mathscr{D}$',
u'\U0001d4a2': '$\\mathscr{G}$',
u'\U0001d4a5': '$\\mathscr{J}$',
u'\U0001d4a6': '$\\mathscr{K}$',
u'\U0001d4a9': '$\\mathscr{N}$',
u'\U0001d4aa': '$\\mathscr{O}$',
u'\U0001d4ab': '$\\mathscr{P}$',
u'\U0001d4ac': '$\\mathscr{Q}$',
u'\U0001d4ae': '$\\mathscr{S}$',
u'\U0001d4af': '$\\mathscr{T}$',
u'\U0001d4b0': '$\\mathscr{U}$',
u'\U0001d4b1': '$\\mathscr{V}$',
u'\U0001d4b2': '$\\mathscr{W}$',
u'\U0001d4b3': '$\\mathscr{X}$',
u'\U0001d4b4': '$\\mathscr{Y}$',
u'\U0001d4b5': '$\\mathscr{Z}$',
u'\U0001d4b6': '$\\mathscr{a}$',
u'\U0001d4b7': '$\\mathscr{b}$',
u'\U0001d4b8': '$\\mathscr{c}$',
u'\U0001d4b9': '$\\mathscr{d}$',
u'\U0001d4bb': '$\\mathscr{f}$',
u'\U0001d4bd': '$\\mathscr{h}$',
u'\U0001d4be': '$\\mathscr{i}$',
u'\U0001d4bf': '$\\mathscr{j}$',
u'\U0001d4c0': '$\\mathscr{k}$',
u'\U0001d4c1': '$\\mathscr{l}$',
u'\U0001d4c2': '$\\mathscr{m}$',
u'\U0001d4c3': '$\\mathscr{n}$',
u'\U0001d4c5': '$\\mathscr{p}$',
u'\U0001d4c6': '$\\mathscr{q}$',
u'\U0001d4c7': '$\\mathscr{r}$',
u'\U0001d4c8': '$\\mathscr{s}$',
u'\U0001d4c9': '$\\mathscr{t}$',
u'\U0001d4ca': '$\\mathscr{u}$',
u'\U0001d4cb': '$\\mathscr{v}$',
u'\U0001d4cc': '$\\mathscr{w}$',
u'\U0001d4cd': '$\\mathscr{x}$',
u'\U0001d4ce': '$\\mathscr{y}$',
u'\U0001d4cf': '$\\mathscr{z}$',
u'\U0001d4d0': '$\\mathmit{A}$',
u'\U0001d4d1': '$\\mathmit{B}$',
u'\U0001d4d2': '$\\mathmit{C}$',
u'\U0001d4d3': '$\\mathmit{D}$',
u'\U0001d4d4': '$\\mathmit{E}$',
u'\U0001d4d5': '$\\mathmit{F}$',
u'\U0001d4d6': '$\\mathmit{G}$',
u'\U0001d4d7': '$\\mathmit{H}$',
u'\U0001d4d8': '$\\mathmit{I}$',
u'\U0001d4d9': '$\\mathmit{J}$',
u'\U0001d4da': '$\\mathmit{K}$',
u'\U0001d4db': '$\\mathmit{L}$',
u'\U0001d4dc': '$\\mathmit{M}$',
u'\U0001d4dd': '$\\mathmit{N}$',
u'\U0001d4de': '$\\mathmit{O}$',
u'\U0001d4df': '$\\mathmit{P}$',
u'\U0001d4e0': '$\\mathmit{Q}$',
u'\U0001d4e1': '$\\mathmit{R}$',
u'\U0001d4e2': '$\\mathmit{S}$',
u'\U0001d4e3': '$\\mathmit{T}$',
u'\U0001d4e4': '$\\mathmit{U}$',
u'\U0001d4e5': '$\\mathmit{V}$',
u'\U0001d4e6': '$\\mathmit{W}$',
u'\U0001d4e7': '$\\mathmit{X}$',
u'\U0001d4e8': '$\\mathmit{Y}$',
u'\U0001d4e9': '$\\mathmit{Z}$',
u'\U0001d4ea': '$\\mathmit{a}$',
u'\U0001d4eb': '$\\mathmit{b}$',
u'\U0001d4ec': '$\\mathmit{c}$',
u'\U0001d4ed': '$\\mathmit{d}$',
u'\U0001d4ee': '$\\mathmit{e}$',
u'\U0001d4ef': '$\\mathmit{f}$',
u'\U0001d4f0': '$\\mathmit{g}$',
u'\U0001d4f1': '$\\mathmit{h}$',
u'\U0001d4f2': '$\\mathmit{i}$',
u'\U0001d4f3': '$\\mathmit{j}$',
u'\U0001d4f4': '$\\mathmit{k}$',
u'\U0001d4f5': '$\\mathmit{l}$',
u'\U0001d4f6': '$\\mathmit{m}$',
u'\U0001d4f7': '$\\mathmit{n}$',
u'\U0001d4f8': '$\\mathmit{o}$',
u'\U0001d4f9': '$\\mathmit{p}$',
u'\U0001d4fa': '$\\mathmit{q}$',
u'\U0001d4fb': '$\\mathmit{r}$',
u'\U0001d4fc': '$\\mathmit{s}$',
u'\U0001d4fd': '$\\mathmit{t}$',
u'\U0001d4fe': '$\\mathmit{u}$',
u'\U0001d4ff': '$\\mathmit{v}$',
u'\U0001d500': '$\\mathmit{w}$',
u'\U0001d501': '$\\mathmit{x}$',
u'\U0001d502': '$\\mathmit{y}$',
u'\U0001d503': '$\\mathmit{z}$',
u'\U0001d504': '$\\mathfrak{A}$',
u'\U0001d505': '$\\mathfrak{B}$',
u'\U0001d507': '$\\mathfrak{D}$',
u'\U0001d508': '$\\mathfrak{E}$',
u'\U0001d509': '$\\mathfrak{F}$',
u'\U0001d50a': '$\\mathfrak{G}$',
u'\U0001d50d': '$\\mathfrak{J}$',
u'\U0001d50e': '$\\mathfrak{K}$',
u'\U0001d50f': '$\\mathfrak{L}$',
u'\U0001d510': '$\\mathfrak{M}$',
u'\U0001d511': '$\\mathfrak{N}$',
u'\U0001d512': '$\\mathfrak{O}$',
u'\U0001d513': '$\\mathfrak{P}$',
u'\U0001d514': '$\\mathfrak{Q}$',
u'\U0001d516': '$\\mathfrak{S}$',
u'\U0001d517': '$\\mathfrak{T}$',
u'\U0001d518': '$\\mathfrak{U}$',
u'\U0001d519': '$\\mathfrak{V}$',
u'\U0001d51a': '$\\mathfrak{W}$',
u'\U0001d51b': '$\\mathfrak{X}$',
u'\U0001d51c': '$\\mathfrak{Y}$',
u'\U0001d51e': '$\\mathfrak{a}$',
u'\U0001d51f': '$\\mathfrak{b}$',
u'\U0001d520': '$\\mathfrak{c}$',
u'\U0001d521': '$\\mathfrak{d}$',
u'\U0001d522': '$\\mathfrak{e}$',
u'\U0001d523': '$\\mathfrak{f}$',
u'\U0001d524': '$\\mathfrak{g}$',
u'\U0001d525': '$\\mathfrak{h}$',
u'\U0001d526': '$\\mathfrak{i}$',
u'\U0001d527': '$\\mathfrak{j}$',
u'\U0001d528': '$\\mathfrak{k}$',
u'\U0001d529': '$\\mathfrak{l}$',
u'\U0001d52a': '$\\mathfrak{m}$',
u'\U0001d52b': '$\\mathfrak{n}$',
u'\U0001d52c': '$\\mathfrak{o}$',
u'\U0001d52d': '$\\mathfrak{p}$',
u'\U0001d52e': '$\\mathfrak{q}$',
u'\U0001d52f': '$\\mathfrak{r}$',
u'\U0001d530': '$\\mathfrak{s}$',
u'\U0001d531': '$\\mathfrak{t}$',
u'\U0001d532': '$\\mathfrak{u}$',
u'\U0001d533': '$\\mathfrak{v}$',
u'\U0001d534': '$\\mathfrak{w}$',
u'\U0001d535': '$\\mathfrak{x}$',
u'\U0001d536': '$\\mathfrak{y}$',
u'\U0001d537': '$\\mathfrak{z}$',
u'\U0001d538': '$\\mathbb{A}$',
u'\U0001d539': '$\\mathbb{B}$',
u'\U0001d53b': '$\\mathbb{D}$',
u'\U0001d53c': '$\\mathbb{E}$',
u'\U0001d53d': '$\\mathbb{F}$',
u'\U0001d53e': '$\\mathbb{G}$',
u'\U0001d540': '$\\mathbb{I}$',
u'\U0001d541': '$\\mathbb{J}$',
u'\U0001d542': '$\\mathbb{K}$',
u'\U0001d543': '$\\mathbb{L}$',
u'\U0001d544': '$\\mathbb{M}$',
u'\U0001d546': '$\\mathbb{O}$',
u'\U0001d54a': '$\\mathbb{S}$',
u'\U0001d54b': '$\\mathbb{T}$',
u'\U0001d54c': '$\\mathbb{U}$',
u'\U0001d54d': '$\\mathbb{V}$',
u'\U0001d54e': '$\\mathbb{W}$',
u'\U0001d54f': '$\\mathbb{X}$',
u'\U0001d550': '$\\mathbb{Y}$',
u'\U0001d552': '$\\mathbb{a}$',
u'\U0001d553': '$\\mathbb{b}$',
u'\U0001d554': '$\\mathbb{c}$',
u'\U0001d555': '$\\mathbb{d}$',
u'\U0001d556': '$\\mathbb{e}$',
u'\U0001d557': '$\\mathbb{f}$',
u'\U0001d558': '$\\mathbb{g}$',
u'\U0001d559': '$\\mathbb{h}$',
u'\U0001d55a': '$\\mathbb{i}$',
u'\U0001d55b': '$\\mathbb{j}$',
u'\U0001d55c': '$\\mathbb{k}$',
u'\U0001d55d': '$\\mathbb{l}$',
u'\U0001d55e': '$\\mathbb{m}$',
u'\U0001d55f': '$\\mathbb{n}$',
u'\U0001d560': '$\\mathbb{o}$',
u'\U0001d561': '$\\mathbb{p}$',
u'\U0001d562': '$\\mathbb{q}$',
u'\U0001d563': '$\\mathbb{r}$',
u'\U0001d564': '$\\mathbb{s}$',
u'\U0001d565': '$\\mathbb{t}$',
u'\U0001d566': '$\\mathbb{u}$',
u'\U0001d567': '$\\mathbb{v}$',
u'\U0001d568': '$\\mathbb{w}$',
u'\U0001d569': '$\\mathbb{x}$',
u'\U0001d56a': '$\\mathbb{y}$',
u'\U0001d56b': '$\\mathbb{z}$',
u'\U0001d56c': '$\\mathslbb{A}$',
u'\U0001d56d': '$\\mathslbb{B}$',
u'\U0001d56e': '$\\mathslbb{C}$',
u'\U0001d56f': '$\\mathslbb{D}$',
u'\U0001d570': '$\\mathslbb{E}$',
u'\U0001d571': '$\\mathslbb{F}$',
u'\U0001d572': '$\\mathslbb{G}$',
u'\U0001d573': '$\\mathslbb{H}$',
u'\U0001d574': '$\\mathslbb{I}$',
u'\U0001d575': '$\\mathslbb{J}$',
u'\U0001d576': '$\\mathslbb{K}$',
u'\U0001d577': '$\\mathslbb{L}$',
u'\U0001d578': '$\\mathslbb{M}$',
u'\U0001d579': '$\\mathslbb{N}$',
u'\U0001d57a': '$\\mathslbb{O}$',
u'\U0001d57b': '$\\mathslbb{P}$',
u'\U0001d57c': '$\\mathslbb{Q}$',
u'\U0001d57d': '$\\mathslbb{R}$',
u'\U0001d57e': '$\\mathslbb{S}$',
u'\U0001d57f': '$\\mathslbb{T}$',
u'\U0001d580': '$\\mathslbb{U}$',
u'\U0001d581': '$\\mathslbb{V}$',
u'\U0001d582': '$\\mathslbb{W}$',
u'\U0001d583': '$\\mathslbb{X}$',
u'\U0001d584': '$\\mathslbb{Y}$',
u'\U0001d585': '$\\mathslbb{Z}$',
u'\U0001d586': '$\\mathslbb{a}$',
u'\U0001d587': '$\\mathslbb{b}$',
u'\U0001d588': '$\\mathslbb{c}$',
u'\U0001d589': '$\\mathslbb{d}$',
u'\U0001d58a': '$\\mathslbb{e}$',
u'\U0001d58b': '$\\mathslbb{f}$',
u'\U0001d58c': '$\\mathslbb{g}$',
u'\U0001d58d': '$\\mathslbb{h}$',
u'\U0001d58e': '$\\mathslbb{i}$',
u'\U0001d58f': '$\\mathslbb{j}$',
u'\U0001d590': '$\\mathslbb{k}$',
u'\U0001d591': '$\\mathslbb{l}$',
u'\U0001d592': '$\\mathslbb{m}$',
u'\U0001d593': '$\\mathslbb{n}$',
u'\U0001d594': '$\\mathslbb{o}$',
u'\U0001d595': '$\\mathslbb{p}$',
u'\U0001d596': '$\\mathslbb{q}$',
u'\U0001d597': '$\\mathslbb{r}$',
u'\U0001d598': '$\\mathslbb{s}$',
u'\U0001d599': '$\\mathslbb{t}$',
u'\U0001d59a': '$\\mathslbb{u}$',
u'\U0001d59b': '$\\mathslbb{v}$',
u'\U0001d59c': '$\\mathslbb{w}$',
u'\U0001d59d': '$\\mathslbb{x}$',
u'\U0001d59e': '$\\mathslbb{y}$',
u'\U0001d59f': '$\\mathslbb{z}$',
u'\U0001d5a0': '$\\mathsf{A}$',
u'\U0001d5a1': '$\\mathsf{B}$',
u'\U0001d5a2': '$\\mathsf{C}$',
u'\U0001d5a3': '$\\mathsf{D}$',
u'\U0001d5a4': '$\\mathsf{E}$',
u'\U0001d5a5': '$\\mathsf{F}$',
u'\U0001d5a6': '$\\mathsf{G}$',
u'\U0001d5a7': '$\\mathsf{H}$',
u'\U0001d5a8': '$\\mathsf{I}$',
u'\U0001d5a9': '$\\mathsf{J}$',
u'\U0001d5aa': '$\\mathsf{K}$',
u'\U0001d5ab': '$\\mathsf{L}$',
u'\U0001d5ac': '$\\mathsf{M}$',
u'\U0001d5ad': '$\\mathsf{N}$',
u'\U0001d5ae': '$\\mathsf{O}$',
u'\U0001d5af': '$\\mathsf{P}$',
u'\U0001d5b0': '$\\mathsf{Q}$',
u'\U0001d5b1': '$\\mathsf{R}$',
u'\U0001d5b2': '$\\mathsf{S}$',
u'\U0001d5b3': '$\\mathsf{T}$',
u'\U0001d5b4': '$\\mathsf{U}$',
u'\U0001d5b5': '$\\mathsf{V}$',
u'\U0001d5b6': '$\\mathsf{W}$',
u'\U0001d5b7': '$\\mathsf{X}$',
u'\U0001d5b8': '$\\mathsf{Y}$',
u'\U0001d5b9': '$\\mathsf{Z}$',
u'\U0001d5ba': '$\\mathsf{a}$',
u'\U0001d5bb': '$\\mathsf{b}$',
u'\U0001d5bc': '$\\mathsf{c}$',
u'\U0001d5bd': '$\\mathsf{d}$',
u'\U0001d5be': '$\\mathsf{e}$',
u'\U0001d5bf': '$\\mathsf{f}$',
u'\U0001d5c0': '$\\mathsf{g}$',
u'\U0001d5c1': '$\\mathsf{h}$',
u'\U0001d5c2': '$\\mathsf{i}$',
u'\U0001d5c3': '$\\mathsf{j}$',
u'\U0001d5c4': '$\\mathsf{k}$',
u'\U0001d5c5': '$\\mathsf{l}$',
u'\U0001d5c6': '$\\mathsf{m}$',
u'\U0001d5c7': '$\\mathsf{n}$',
u'\U0001d5c8': '$\\mathsf{o}$',
u'\U0001d5c9': '$\\mathsf{p}$',
u'\U0001d5ca': '$\\mathsf{q}$',
u'\U0001d5cb': '$\\mathsf{r}$',
u'\U0001d5cc': '$\\mathsf{s}$',
u'\U0001d5cd': '$\\mathsf{t}$',
u'\U0001d5ce': '$\\mathsf{u}$',
u'\U0001d5cf': '$\\mathsf{v}$',
u'\U0001d5d0': '$\\mathsf{w}$',
u'\U0001d5d1': '$\\mathsf{x}$',
u'\U0001d5d2': '$\\mathsf{y}$',
u'\U0001d5d3': '$\\mathsf{z}$',
u'\U0001d5d4': '$\\mathsfbf{A}$',
u'\U0001d5d5': '$\\mathsfbf{B}$',
u'\U0001d5d6': '$\\mathsfbf{C}$',
u'\U0001d5d7': '$\\mathsfbf{D}$',
u'\U0001d5d8': '$\\mathsfbf{E}$',
u'\U0001d5d9': '$\\mathsfbf{F}$',
u'\U0001d5da': '$\\mathsfbf{G}$',
u'\U0001d5db': '$\\mathsfbf{H}$',
u'\U0001d5dc': '$\\mathsfbf{I}$',
u'\U0001d5dd': '$\\mathsfbf{J}$',
u'\U0001d5de': '$\\mathsfbf{K}$',
u'\U0001d5df': '$\\mathsfbf{L}$',
u'\U0001d5e0': '$\\mathsfbf{M}$',
u'\U0001d5e1': '$\\mathsfbf{N}$',
u'\U0001d5e2': '$\\mathsfbf{O}$',
u'\U0001d5e3': '$\\mathsfbf{P}$',
u'\U0001d5e4': '$\\mathsfbf{Q}$',
u'\U0001d5e5': '$\\mathsfbf{R}$',
u'\U0001d5e6': '$\\mathsfbf{S}$',
u'\U0001d5e7': '$\\mathsfbf{T}$',
u'\U0001d5e8': '$\\mathsfbf{U}$',
u'\U0001d5e9': '$\\mathsfbf{V}$',
u'\U0001d5ea': '$\\mathsfbf{W}$',
u'\U0001d5eb': '$\\mathsfbf{X}$',
u'\U0001d5ec': '$\\mathsfbf{Y}$',
u'\U0001d5ed': '$\\mathsfbf{Z}$',
u'\U0001d5ee': '$\\mathsfbf{a}$',
u'\U0001d5ef': '$\\mathsfbf{b}$',
u'\U0001d5f0': '$\\mathsfbf{c}$',
u'\U0001d5f1': '$\\mathsfbf{d}$',
u'\U0001d5f2': '$\\mathsfbf{e}$',
u'\U0001d5f3': '$\\mathsfbf{f}$',
u'\U0001d5f4': '$\\mathsfbf{g}$',
u'\U0001d5f5': '$\\mathsfbf{h}$',
u'\U0001d5f6': '$\\mathsfbf{i}$',
u'\U0001d5f7': '$\\mathsfbf{j}$',
u'\U0001d5f8': '$\\mathsfbf{k}$',
u'\U0001d5f9': '$\\mathsfbf{l}$',
u'\U0001d5fa': '$\\mathsfbf{m}$',
u'\U0001d5fb': '$\\mathsfbf{n}$',
u'\U0001d5fc': '$\\mathsfbf{o}$',
u'\U0001d5fd': '$\\mathsfbf{p}$',
u'\U0001d5fe': '$\\mathsfbf{q}$',
u'\U0001d5ff': '$\\mathsfbf{r}$',
u'\U0001d600': '$\\mathsfbf{s}$',
u'\U0001d601': '$\\mathsfbf{t}$',
u'\U0001d602': '$\\mathsfbf{u}$',
u'\U0001d603': '$\\mathsfbf{v}$',
u'\U0001d604': '$\\mathsfbf{w}$',
u'\U0001d605': '$\\mathsfbf{x}$',
u'\U0001d606': '$\\mathsfbf{y}$',
u'\U0001d607': '$\\mathsfbf{z}$',
u'\U0001d608': '$\\mathsfsl{A}$',
u'\U0001d609': '$\\mathsfsl{B}$',
u'\U0001d60a': '$\\mathsfsl{C}$',
u'\U0001d60b': '$\\mathsfsl{D}$',
u'\U0001d60c': '$\\mathsfsl{E}$',
u'\U0001d60d': '$\\mathsfsl{F}$',
u'\U0001d60e': '$\\mathsfsl{G}$',
u'\U0001d60f': '$\\mathsfsl{H}$',
u'\U0001d610': '$\\mathsfsl{I}$',
u'\U0001d611': '$\\mathsfsl{J}$',
u'\U0001d612': '$\\mathsfsl{K}$',
u'\U0001d613': '$\\mathsfsl{L}$',
u'\U0001d614': '$\\mathsfsl{M}$',
u'\U0001d615': '$\\mathsfsl{N}$',
u'\U0001d616': '$\\mathsfsl{O}$',
u'\U0001d617': '$\\mathsfsl{P}$',
u'\U0001d618': '$\\mathsfsl{Q}$',
u'\U0001d619': '$\\mathsfsl{R}$',
u'\U0001d61a': '$\\mathsfsl{S}$',
u'\U0001d61b': '$\\mathsfsl{T}$',
u'\U0001d61c': '$\\mathsfsl{U}$',
u'\U0001d61d': '$\\mathsfsl{V}$',
u'\U0001d61e': '$\\mathsfsl{W}$',
u'\U0001d61f': '$\\mathsfsl{X}$',
u'\U0001d620': '$\\mathsfsl{Y}$',
u'\U0001d621': '$\\mathsfsl{Z}$',
u'\U0001d622': '$\\mathsfsl{a}$',
u'\U0001d623': '$\\mathsfsl{b}$',
u'\U0001d624': '$\\mathsfsl{c}$',
u'\U0001d625': '$\\mathsfsl{d}$',
u'\U0001d626': '$\\mathsfsl{e}$',
u'\U0001d627': '$\\mathsfsl{f}$',
u'\U0001d628': '$\\mathsfsl{g}$',
u'\U0001d629': '$\\mathsfsl{h}$',
u'\U0001d62a': '$\\mathsfsl{i}$',
u'\U0001d62b': '$\\mathsfsl{j}$',
u'\U0001d62c': '$\\mathsfsl{k}$',
u'\U0001d62d': '$\\mathsfsl{l}$',
u'\U0001d62e': '$\\mathsfsl{m}$',
u'\U0001d62f': '$\\mathsfsl{n}$',
u'\U0001d630': '$\\mathsfsl{o}$',
u'\U0001d631': '$\\mathsfsl{p}$',
u'\U0001d632': '$\\mathsfsl{q}$',
u'\U0001d633': '$\\mathsfsl{r}$',
u'\U0001d634': '$\\mathsfsl{s}$',
u'\U0001d635': '$\\mathsfsl{t}$',
u'\U0001d636': '$\\mathsfsl{u}$',
u'\U0001d637': '$\\mathsfsl{v}$',
u'\U0001d638': '$\\mathsfsl{w}$',
u'\U0001d639': '$\\mathsfsl{x}$',
u'\U0001d63a': '$\\mathsfsl{y}$',
u'\U0001d63b': '$\\mathsfsl{z}$',
u'\U0001d63c': '$\\mathsfbfsl{A}$',
u'\U0001d63d': '$\\mathsfbfsl{B}$',
u'\U0001d63e': '$\\mathsfbfsl{C}$',
u'\U0001d63f': '$\\mathsfbfsl{D}$',
u'\U0001d640': '$\\mathsfbfsl{E}$',
u'\U0001d641': '$\\mathsfbfsl{F}$',
u'\U0001d642': '$\\mathsfbfsl{G}$',
u'\U0001d643': '$\\mathsfbfsl{H}$',
u'\U0001d644': '$\\mathsfbfsl{I}$',
u'\U0001d645': '$\\mathsfbfsl{J}$',
u'\U0001d646': '$\\mathsfbfsl{K}$',
u'\U0001d647': '$\\mathsfbfsl{L}$',
u'\U0001d648': '$\\mathsfbfsl{M}$',
u'\U0001d649': '$\\mathsfbfsl{N}$',
u'\U0001d64a': '$\\mathsfbfsl{O}$',
u'\U0001d64b': '$\\mathsfbfsl{P}$',
u'\U0001d64c': '$\\mathsfbfsl{Q}$',
u'\U0001d64d': '$\\mathsfbfsl{R}$',
u'\U0001d64e': '$\\mathsfbfsl{S}$',
u'\U0001d64f': '$\\mathsfbfsl{T}$',
u'\U0001d650': '$\\mathsfbfsl{U}$',
u'\U0001d651': '$\\mathsfbfsl{V}$',
u'\U0001d652': '$\\mathsfbfsl{W}$',
u'\U0001d653': '$\\mathsfbfsl{X}$',
u'\U0001d654': '$\\mathsfbfsl{Y}$',
u'\U0001d655': '$\\mathsfbfsl{Z}$',
u'\U0001d656': '$\\mathsfbfsl{a}$',
u'\U0001d657': '$\\mathsfbfsl{b}$',
u'\U0001d658': '$\\mathsfbfsl{c}$',
u'\U0001d659': '$\\mathsfbfsl{d}$',
u'\U0001d65a': '$\\mathsfbfsl{e}$',
u'\U0001d65b': '$\\mathsfbfsl{f}$',
u'\U0001d65c': '$\\mathsfbfsl{g}$',
u'\U0001d65d': '$\\mathsfbfsl{h}$',
u'\U0001d65e': '$\\mathsfbfsl{i}$',
u'\U0001d65f': '$\\mathsfbfsl{j}$',
u'\U0001d660': '$\\mathsfbfsl{k}$',
u'\U0001d661': '$\\mathsfbfsl{l}$',
u'\U0001d662': '$\\mathsfbfsl{m}$',
u'\U0001d663': '$\\mathsfbfsl{n}$',
u'\U0001d664': '$\\mathsfbfsl{o}$',
u'\U0001d665': '$\\mathsfbfsl{p}$',
u'\U0001d666': '$\\mathsfbfsl{q}$',
u'\U0001d667': '$\\mathsfbfsl{r}$',
u'\U0001d668': '$\\mathsfbfsl{s}$',
u'\U0001d669': '$\\mathsfbfsl{t}$',
u'\U0001d66a': '$\\mathsfbfsl{u}$',
u'\U0001d66b': '$\\mathsfbfsl{v}$',
u'\U0001d66c': '$\\mathsfbfsl{w}$',
u'\U0001d66d': '$\\mathsfbfsl{x}$',
u'\U0001d66e': '$\\mathsfbfsl{y}$',
u'\U0001d66f': '$\\mathsfbfsl{z}$',
u'\U0001d670': '$\\mathtt{A}$',
u'\U0001d671': '$\\mathtt{B}$',
u'\U0001d672': '$\\mathtt{C}$',
u'\U0001d673': '$\\mathtt{D}$',
u'\U0001d674': '$\\mathtt{E}$',
u'\U0001d675': '$\\mathtt{F}$',
u'\U0001d676': '$\\mathtt{G}$',
u'\U0001d677': '$\\mathtt{H}$',
u'\U0001d678': '$\\mathtt{I}$',
u'\U0001d679': '$\\mathtt{J}$',
u'\U0001d67a': '$\\mathtt{K}$',
u'\U0001d67b': '$\\mathtt{L}$',
u'\U0001d67c': '$\\mathtt{M}$',
u'\U0001d67d': '$\\mathtt{N}$',
u'\U0001d67e': '$\\mathtt{O}$',
u'\U0001d67f': '$\\mathtt{P}$',
u'\U0001d680': '$\\mathtt{Q}$',
u'\U0001d681': '$\\mathtt{R}$',
u'\U0001d682': '$\\mathtt{S}$',
u'\U0001d683': '$\\mathtt{T}$',
u'\U0001d684': '$\\mathtt{U}$',
u'\U0001d685': '$\\mathtt{V}$',
u'\U0001d686': '$\\mathtt{W}$',
u'\U0001d687': '$\\mathtt{X}$',
u'\U0001d688': '$\\mathtt{Y}$',
u'\U0001d689': '$\\mathtt{Z}$',
u'\U0001d68a': '$\\mathtt{a}$',
u'\U0001d68b': '$\\mathtt{b}$',
u'\U0001d68c': '$\\mathtt{c}$',
u'\U0001d68d': '$\\mathtt{d}$',
u'\U0001d68e': '$\\mathtt{e}$',
u'\U0001d68f': '$\\mathtt{f}$',
u'\U0001d690': '$\\mathtt{g}$',
u'\U0001d691': '$\\mathtt{h}$',
u'\U0001d692': '$\\mathtt{i}$',
u'\U0001d693': '$\\mathtt{j}$',
u'\U0001d694': '$\\mathtt{k}$',
u'\U0001d695': '$\\mathtt{l}$',
u'\U0001d696': '$\\mathtt{m}$',
u'\U0001d697': '$\\mathtt{n}$',
u'\U0001d698': '$\\mathtt{o}$',
u'\U0001d699': '$\\mathtt{p}$',
u'\U0001d69a': '$\\mathtt{q}$',
u'\U0001d69b': '$\\mathtt{r}$',
u'\U0001d69c': '$\\mathtt{s}$',
u'\U0001d69d': '$\\mathtt{t}$',
u'\U0001d69e': '$\\mathtt{u}$',
u'\U0001d69f': '$\\mathtt{v}$',
u'\U0001d6a0': '$\\mathtt{w}$',
u'\U0001d6a1': '$\\mathtt{x}$',
u'\U0001d6a2': '$\\mathtt{y}$',
u'\U0001d6a3': '$\\mathtt{z}$',
u'\U0001d6a8': '$\\mathbf{\\Alpha}$',
u'\U0001d6a9': '$\\mathbf{\\Beta}$',
u'\U0001d6aa': '$\\mathbf{\\Gamma}$',
u'\U0001d6ab': '$\\mathbf{\\Delta}$',
u'\U0001d6ac': '$\\mathbf{\\Epsilon}$',
u'\U0001d6ad': '$\\mathbf{\\Zeta}$',
u'\U0001d6ae': '$\\mathbf{\\Eta}$',
u'\U0001d6af': '$\\mathbf{\\Theta}$',
u'\U0001d6b0': '$\\mathbf{\\Iota}$',
u'\U0001d6b1': '$\\mathbf{\\Kappa}$',
u'\U0001d6b2': '$\\mathbf{\\Lambda}$',
u'\U0001d6b3': '$M$',
u'\U0001d6b4': '$N$',
u'\U0001d6b5': '$\\mathbf{\\Xi}$',
u'\U0001d6b6': '$O$',
u'\U0001d6b7': '$\\mathbf{\\Pi}$',
u'\U0001d6b8': '$\\mathbf{\\Rho}$',
u'\U0001d6b9': '{\\mathbf{\\vartheta}}',
u'\U0001d6ba': '$\\mathbf{\\Sigma}$',
u'\U0001d6bb': '$\\mathbf{\\Tau}$',
u'\U0001d6bc': '$\\mathbf{\\Upsilon}$',
u'\U0001d6bd': '$\\mathbf{\\Phi}$',
u'\U0001d6be': '$\\mathbf{\\Chi}$',
u'\U0001d6bf': '$\\mathbf{\\Psi}$',
u'\U0001d6c0': '$\\mathbf{\\Omega}$',
u'\U0001d6c1': '$\\mathbf{\\nabla}$',
u'\U0001d6c2': '$\\mathbf{\\Alpha}$',
u'\U0001d6c3': '$\\mathbf{\\Beta}$',
u'\U0001d6c4': '$\\mathbf{\\Gamma}$',
u'\U0001d6c5': '$\\mathbf{\\Delta}$',
u'\U0001d6c6': '$\\mathbf{\\Epsilon}$',
u'\U0001d6c7': '$\\mathbf{\\Zeta}$',
u'\U0001d6c8': '$\\mathbf{\\Eta}$',
u'\U0001d6c9': '$\\mathbf{\\theta}$',
u'\U0001d6ca': '$\\mathbf{\\Iota}$',
u'\U0001d6cb': '$\\mathbf{\\Kappa}$',
u'\U0001d6cc': '$\\mathbf{\\Lambda}$',
u'\U0001d6cd': '$M$',
u'\U0001d6ce': '$N$',
u'\U0001d6cf': '$\\mathbf{\\Xi}$',
u'\U0001d6d0': '$O$',
u'\U0001d6d1': '$\\mathbf{\\Pi}$',
u'\U0001d6d2': '$\\mathbf{\\Rho}$',
u'\U0001d6d3': '$\\mathbf{\\varsigma}$',
u'\U0001d6d4': '$\\mathbf{\\Sigma}$',
u'\U0001d6d5': '$\\mathbf{\\Tau}$',
u'\U0001d6d6': '$\\mathbf{\\Upsilon}$',
u'\U0001d6d7': '$\\mathbf{\\Phi}$',
u'\U0001d6d8': '$\\mathbf{\\Chi}$',
u'\U0001d6d9': '$\\mathbf{\\Psi}$',
u'\U0001d6da': '$\\mathbf{\\Omega}$',
u'\U0001d6db': '$\\partial$',
u'\U0001d6dc': '$\\in$',
u'\U0001d6dd': '{\\mathbf{\\vartheta}}',
u'\U0001d6de': '{\\mathbf{\\varkappa}}',
u'\U0001d6df': '{\\mathbf{\\phi}}',
u'\U0001d6e0': '{\\mathbf{\\varrho}}',
u'\U0001d6e1': '{\\mathbf{\\varpi}}',
u'\U0001d6e2': '$\\mathsl{\\Alpha}$',
u'\U0001d6e3': '$\\mathsl{\\Beta}$',
u'\U0001d6e4': '$\\mathsl{\\Gamma}$',
u'\U0001d6e5': '$\\mathsl{\\Delta}$',
u'\U0001d6e6': '$\\mathsl{\\Epsilon}$',
u'\U0001d6e7': '$\\mathsl{\\Zeta}$',
u'\U0001d6e8': '$\\mathsl{\\Eta}$',
u'\U0001d6e9': '$\\mathsl{\\Theta}$',
u'\U0001d6ea': '$\\mathsl{\\Iota}$',
u'\U0001d6eb': '$\\mathsl{\\Kappa}$',
u'\U0001d6ec': '$\\mathsl{\\Lambda}$',
u'\U0001d6ed': '$M$',
u'\U0001d6ee': '$N$',
u'\U0001d6ef': '$\\mathsl{\\Xi}$',
u'\U0001d6f0': '$O$',
u'\U0001d6f1': '$\\mathsl{\\Pi}$',
u'\U0001d6f2': '$\\mathsl{\\Rho}$',
u'\U0001d6f3': '{\\mathsl{\\vartheta}}',
u'\U0001d6f4': '$\\mathsl{\\Sigma}$',
u'\U0001d6f5': '$\\mathsl{\\Tau}$',
u'\U0001d6f6': '$\\mathsl{\\Upsilon}$',
u'\U0001d6f7': '$\\mathsl{\\Phi}$',
u'\U0001d6f8': '$\\mathsl{\\Chi}$',
u'\U0001d6f9': '$\\mathsl{\\Psi}$',
u'\U0001d6fa': '$\\mathsl{\\Omega}$',
u'\U0001d6fb': '$\\mathsl{\\nabla}$',
u'\U0001d6fc': '$\\mathsl{\\Alpha}$',
u'\U0001d6fd': '$\\mathsl{\\Beta}$',
u'\U0001d6fe': '$\\mathsl{\\Gamma}$',
u'\U0001d6ff': '$\\mathsl{\\Delta}$',
u'\U0001d700': '$\\mathsl{\\Epsilon}$',
u'\U0001d701': '$\\mathsl{\\Zeta}$',
u'\U0001d702': '$\\mathsl{\\Eta}$',
u'\U0001d703': '$\\mathsl{\\Theta}$',
u'\U0001d704': '$\\mathsl{\\Iota}$',
u'\U0001d705': '$\\mathsl{\\Kappa}$',
u'\U0001d706': '$\\mathsl{\\Lambda}$',
u'\U0001d707': '$M$',
u'\U0001d708': '$N$',
u'\U0001d709': '$\\mathsl{\\Xi}$',
u'\U0001d70a': '$O$',
u'\U0001d70b': '$\\mathsl{\\Pi}$',
u'\U0001d70c': '$\\mathsl{\\Rho}$',
u'\U0001d70d': '$\\mathsl{\\varsigma}$',
u'\U0001d70e': '$\\mathsl{\\Sigma}$',
u'\U0001d70f': '$\\mathsl{\\Tau}$',
u'\U0001d710': '$\\mathsl{\\Upsilon}$',
u'\U0001d711': '$\\mathsl{\\Phi}$',
u'\U0001d712': '$\\mathsl{\\Chi}$',
u'\U0001d713': '$\\mathsl{\\Psi}$',
u'\U0001d714': '$\\mathsl{\\Omega}$',
u'\U0001d715': '$\\partial$',
u'\U0001d716': '$\\in$',
u'\U0001d717': '{\\mathsl{\\vartheta}}',
u'\U0001d718': '{\\mathsl{\\varkappa}}',
u'\U0001d719': '{\\mathsl{\\phi}}',
u'\U0001d71a': '{\\mathsl{\\varrho}}',
u'\U0001d71b': '{\\mathsl{\\varpi}}',
u'\U0001d71c': '$\\mathbit{\\Alpha}$',
u'\U0001d71d': '$\\mathbit{\\Beta}$',
u'\U0001d71e': '$\\mathbit{\\Gamma}$',
u'\U0001d71f': '$\\mathbit{\\Delta}$',
u'\U0001d720': '$\\mathbit{\\Epsilon}$',
u'\U0001d721': '$\\mathbit{\\Zeta}$',
u'\U0001d722': '$\\mathbit{\\Eta}$',
u'\U0001d723': '$\\mathbit{\\Theta}$',
u'\U0001d724': '$\\mathbit{\\Iota}$',
u'\U0001d725': '$\\mathbit{\\Kappa}$',
u'\U0001d726': '$\\mathbit{\\Lambda}$',
u'\U0001d727': '$M$',
u'\U0001d728': '$N$',
u'\U0001d729': '$\\mathbit{\\Xi}$',
u'\U0001d72a': '$O$',
u'\U0001d72b': '$\\mathbit{\\Pi}$',
u'\U0001d72c': '$\\mathbit{\\Rho}$',
u'\U0001d72d': '{\\mathbit{O}}',
u'\U0001d72e': '$\\mathbit{\\Sigma}$',
u'\U0001d72f': '$\\mathbit{\\Tau}$',
u'\U0001d730': '$\\mathbit{\\Upsilon}$',
u'\U0001d731': '$\\mathbit{\\Phi}$',
u'\U0001d732': '$\\mathbit{\\Chi}$',
u'\U0001d733': '$\\mathbit{\\Psi}$',
u'\U0001d734': '$\\mathbit{\\Omega}$',
u'\U0001d735': '$\\mathbit{\\nabla}$',
u'\U0001d736': '$\\mathbit{\\Alpha}$',
u'\U0001d737': '$\\mathbit{\\Beta}$',
u'\U0001d738': '$\\mathbit{\\Gamma}$',
u'\U0001d739': '$\\mathbit{\\Delta}$',
u'\U0001d73a': '$\\mathbit{\\Epsilon}$',
u'\U0001d73b': '$\\mathbit{\\Zeta}$',
u'\U0001d73c': '$\\mathbit{\\Eta}$',
u'\U0001d73d': '$\\mathbit{\\Theta}$',
u'\U0001d73e': '$\\mathbit{\\Iota}$',
u'\U0001d73f': '$\\mathbit{\\Kappa}$',
u'\U0001d740': '$\\mathbit{\\Lambda}$',
u'\U0001d741': '$M$',
u'\U0001d742': '$N$',
u'\U0001d743': '$\\mathbit{\\Xi}$',
u'\U0001d744': '$O$',
u'\U0001d745': '$\\mathbit{\\Pi}$',
u'\U0001d746': '$\\mathbit{\\Rho}$',
u'\U0001d747': '$\\mathbit{\\varsigma}$',
u'\U0001d748': '$\\mathbit{\\Sigma}$',
u'\U0001d749': '$\\mathbit{\\Tau}$',
u'\U0001d74a': '$\\mathbit{\\Upsilon}$',
u'\U0001d74b': '$\\mathbit{\\Phi}$',
u'\U0001d74c': '$\\mathbit{\\Chi}$',
u'\U0001d74d': '$\\mathbit{\\Psi}$',
u'\U0001d74e': '$\\mathbit{\\Omega}$',
u'\U0001d74f': '$\\partial$',
u'\U0001d750': '$\\in$',
u'\U0001d751': '{\\mathbit{\\vartheta}}',
u'\U0001d752': '{\\mathbit{\\varkappa}}',
u'\U0001d753': '{\\mathbit{\\phi}}',
u'\U0001d754': '{\\mathbit{\\varrho}}',
u'\U0001d755': '{\\mathbit{\\varpi}}',
u'\U0001d756': '$\\mathsfbf{\\Alpha}$',
u'\U0001d757': '$\\mathsfbf{\\Beta}$',
u'\U0001d758': '$\\mathsfbf{\\Gamma}$',
u'\U0001d759': '$\\mathsfbf{\\Delta}$',
u'\U0001d75a': '$\\mathsfbf{\\Epsilon}$',
u'\U0001d75b': '$\\mathsfbf{\\Zeta}$',
u'\U0001d75c': '$\\mathsfbf{\\Eta}$',
u'\U0001d75d': '$\\mathsfbf{\\Theta}$',
u'\U0001d75e': '$\\mathsfbf{\\Iota}$',
u'\U0001d75f': '$\\mathsfbf{\\Kappa}$',
u'\U0001d760': '$\\mathsfbf{\\Lambda}$',
u'\U0001d761': '$M$',
u'\U0001d762': '$N$',
u'\U0001d763': '$\\mathsfbf{\\Xi}$',
u'\U0001d764': '$O$',
u'\U0001d765': '$\\mathsfbf{\\Pi}$',
u'\U0001d766': '$\\mathsfbf{\\Rho}$',
u'\U0001d767': '{\\mathsfbf{\\vartheta}}',
u'\U0001d768': '$\\mathsfbf{\\Sigma}$',
u'\U0001d769': '$\\mathsfbf{\\Tau}$',
u'\U0001d76a': '$\\mathsfbf{\\Upsilon}$',
u'\U0001d76b': '$\\mathsfbf{\\Phi}$',
u'\U0001d76c': '$\\mathsfbf{\\Chi}$',
u'\U0001d76d': '$\\mathsfbf{\\Psi}$',
u'\U0001d76e': '$\\mathsfbf{\\Omega}$',
u'\U0001d76f': '$\\mathsfbf{\\nabla}$',
u'\U0001d770': '$\\mathsfbf{\\Alpha}$',
u'\U0001d771': '$\\mathsfbf{\\Beta}$',
u'\U0001d772': '$\\mathsfbf{\\Gamma}$',
u'\U0001d773': '$\\mathsfbf{\\Delta}$',
u'\U0001d774': '$\\mathsfbf{\\Epsilon}$',
u'\U0001d775': '$\\mathsfbf{\\Zeta}$',
u'\U0001d776': '$\\mathsfbf{\\Eta}$',
u'\U0001d777': '$\\mathsfbf{\\Theta}$',
u'\U0001d778': '$\\mathsfbf{\\Iota}$',
u'\U0001d779': '$\\mathsfbf{\\Kappa}$',
u'\U0001d77a': '$\\mathsfbf{\\Lambda}$',
u'\U0001d77b': '$M$',
u'\U0001d77c': '$N$',
u'\U0001d77d': '$\\mathsfbf{\\Xi}$',
u'\U0001d77e': '$O$',
u'\U0001d77f': '$\\mathsfbf{\\Pi}$',
u'\U0001d780': '$\\mathsfbf{\\Rho}$',
u'\U0001d781': '$\\mathsfbf{\\varsigma}$',
u'\U0001d782': '$\\mathsfbf{\\Sigma}$',
u'\U0001d783': '$\\mathsfbf{\\Tau}$',
u'\U0001d784': '$\\mathsfbf{\\Upsilon}$',
u'\U0001d785': '$\\mathsfbf{\\Phi}$',
u'\U0001d786': '$\\mathsfbf{\\Chi}$',
u'\U0001d787': '$\\mathsfbf{\\Psi}$',
u'\U0001d788': '$\\mathsfbf{\\Omega}$',
u'\U0001d789': '$\\partial$',
u'\U0001d78a': '$\\in$',
u'\U0001d78b': '{\\mathsfbf{\\vartheta}}',
u'\U0001d78c': '{\\mathsfbf{\\varkappa}}',
u'\U0001d78d': '{\\mathsfbf{\\phi}}',
u'\U0001d78e': '{\\mathsfbf{\\varrho}}',
u'\U0001d78f': '{\\mathsfbf{\\varpi}}',
u'\U0001d790': '$\\mathsfbfsl{\\Alpha}$',
u'\U0001d791': '$\\mathsfbfsl{\\Beta}$',
u'\U0001d792': '$\\mathsfbfsl{\\Gamma}$',
u'\U0001d793': '$\\mathsfbfsl{\\Delta}$',
u'\U0001d794': '$\\mathsfbfsl{\\Epsilon}$',
u'\U0001d795': '$\\mathsfbfsl{\\Zeta}$',
u'\U0001d796': '$\\mathsfbfsl{\\Eta}$',
u'\U0001d797': '$\\mathsfbfsl{\\vartheta}$',
u'\U0001d798': '$\\mathsfbfsl{\\Iota}$',
u'\U0001d799': '$\\mathsfbfsl{\\Kappa}$',
u'\U0001d79a': '$\\mathsfbfsl{\\Lambda}$',
u'\U0001d79b': '$M$',
u'\U0001d79c': '$N$',
u'\U0001d79d': '$\\mathsfbfsl{\\Xi}$',
u'\U0001d79e': '$O$',
u'\U0001d79f': '$\\mathsfbfsl{\\Pi}$',
u'\U0001d7a0': '$\\mathsfbfsl{\\Rho}$',
u'\U0001d7a1': '{\\mathsfbfsl{\\vartheta}}',
u'\U0001d7a2': '$\\mathsfbfsl{\\Sigma}$',
u'\U0001d7a3': '$\\mathsfbfsl{\\Tau}$',
u'\U0001d7a4': '$\\mathsfbfsl{\\Upsilon}$',
u'\U0001d7a5': '$\\mathsfbfsl{\\Phi}$',
u'\U0001d7a6': '$\\mathsfbfsl{\\Chi}$',
u'\U0001d7a7': '$\\mathsfbfsl{\\Psi}$',
u'\U0001d7a8': '$\\mathsfbfsl{\\Omega}$',
u'\U0001d7a9': '$\\mathsfbfsl{\\nabla}$',
u'\U0001d7aa': '$\\mathsfbfsl{\\Alpha}$',
u'\U0001d7ab': '$\\mathsfbfsl{\\Beta}$',
u'\U0001d7ac': '$\\mathsfbfsl{\\Gamma}$',
u'\U0001d7ad': '$\\mathsfbfsl{\\Delta}$',
u'\U0001d7ae': '$\\mathsfbfsl{\\Epsilon}$',
u'\U0001d7af': '$\\mathsfbfsl{\\Zeta}$',
u'\U0001d7b0': '$\\mathsfbfsl{\\Eta}$',
u'\U0001d7b1': '$\\mathsfbfsl{\\vartheta}$',
u'\U0001d7b2': '$\\mathsfbfsl{\\Iota}$',
u'\U0001d7b3': '$\\mathsfbfsl{\\Kappa}$',
u'\U0001d7b4': '$\\mathsfbfsl{\\Lambda}$',
u'\U0001d7b5': '$M$',
u'\U0001d7b6': '$N$',
u'\U0001d7b7': '$\\mathsfbfsl{\\Xi}$',
u'\U0001d7b8': '$O$',
u'\U0001d7b9': '$\\mathsfbfsl{\\Pi}$',
u'\U0001d7ba': '$\\mathsfbfsl{\\Rho}$',
u'\U0001d7bb': '$\\mathsfbfsl{\\varsigma}$',
u'\U0001d7bc': '$\\mathsfbfsl{\\Sigma}$',
u'\U0001d7bd': '$\\mathsfbfsl{\\Tau}$',
u'\U0001d7be': '$\\mathsfbfsl{\\Upsilon}$',
u'\U0001d7bf': '$\\mathsfbfsl{\\Phi}$',
u'\U0001d7c0': '$\\mathsfbfsl{\\Chi}$',
u'\U0001d7c1': '$\\mathsfbfsl{\\Psi}$',
u'\U0001d7c2': '$\\mathsfbfsl{\\Omega}$',
u'\U0001d7c3': '$\\partial$',
u'\U0001d7c4': '$\\in$',
u'\U0001d7c5': '{\\mathsfbfsl{\\vartheta}}',
u'\U0001d7c6': '{\\mathsfbfsl{\\varkappa}}',
u'\U0001d7c7': '{\\mathsfbfsl{\\phi}}',
u'\U0001d7c8': '{\\mathsfbfsl{\\varrho}}',
u'\U0001d7c9': '{\\mathsfbfsl{\\varpi}}',
u'\U0001d7ce': '$\\mathbf{0}$',
u'\U0001d7cf': '$\\mathbf{1}$',
u'\U0001d7d0': '$\\mathbf{2}$',
u'\U0001d7d1': '$\\mathbf{3}$',
u'\U0001d7d2': '$\\mathbf{4}$',
u'\U0001d7d3': '$\\mathbf{5}$',
u'\U0001d7d4': '$\\mathbf{6}$',
u'\U0001d7d5': '$\\mathbf{7}$',
u'\U0001d7d6': '$\\mathbf{8}$',
u'\U0001d7d7': '$\\mathbf{9}$',
u'\U0001d7d8': '$\\mathbb{0}$',
u'\U0001d7d9': '$\\mathbb{1}$',
u'\U0001d7da': '$\\mathbb{2}$',
u'\U0001d7db': '$\\mathbb{3}$',
u'\U0001d7dc': '$\\mathbb{4}$',
u'\U0001d7dd': '$\\mathbb{5}$',
u'\U0001d7de': '$\\mathbb{6}$',
u'\U0001d7df': '$\\mathbb{7}$',
u'\U0001d7e0': '$\\mathbb{8}$',
u'\U0001d7e1': '$\\mathbb{9}$',
u'\U0001d7e2': '$\\mathsf{0}$',
u'\U0001d7e3': '$\\mathsf{1}$',
u'\U0001d7e4': '$\\mathsf{2}$',
u'\U0001d7e5': '$\\mathsf{3}$',
u'\U0001d7e6': '$\\mathsf{4}$',
u'\U0001d7e7': '$\\mathsf{5}$',
u'\U0001d7e8': '$\\mathsf{6}$',
u'\U0001d7e9': '$\\mathsf{7}$',
u'\U0001d7ea': '$\\mathsf{8}$',
u'\U0001d7eb': '$\\mathsf{9}$',
u'\U0001d7ec': '$\\mathsfbf{0}$',
u'\U0001d7ed': '$\\mathsfbf{1}$',
u'\U0001d7ee': '$\\mathsfbf{2}$',
u'\U0001d7ef': '$\\mathsfbf{3}$',
u'\U0001d7f0': '$\\mathsfbf{4}$',
u'\U0001d7f1': '$\\mathsfbf{5}$',
u'\U0001d7f2': '$\\mathsfbf{6}$',
u'\U0001d7f3': '$\\mathsfbf{7}$',
u'\U0001d7f4': '$\\mathsfbf{8}$',
u'\U0001d7f5': '$\\mathsfbf{9}$',
u'\U0001d7f6': '$\\mathtt{0}$',
u'\U0001d7f7': '$\\mathtt{1}$',
u'\U0001d7f8': '$\\mathtt{2}$',
u'\U0001d7f9': '$\\mathtt{3}$',
u'\U0001d7fa': '$\\mathtt{4}$',
u'\U0001d7fb': '$\\mathtt{5}$',
u'\U0001d7fc': '$\\mathtt{6}$',
u'\U0001d7fd': '$\\mathtt{7}$',
u'\U0001d7fe': '$\\mathtt{8}$',
u'\U0001d7ff': '$\\mathtt{9}$'}
| Python |
# $Id: __init__.py 5738 2008-11-30 08:59:04Z grubert $
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This module has been placed in the public domain.
"""
LaTeX2e document tree Writer.
"""
# Thanks to Engelbert Gruber and various contributors for the original
# LaTeX writer, some code and many ideas of which have been used for
# this writer.
__docformat__ = 'reStructuredText'
import re
import os.path
import docutils
from docutils import nodes, writers, utils
from docutils.writers.newlatex2e import unicode_map
from docutils.transforms import writer_aux
class Writer(writers.Writer):
supported = ('newlatex', 'newlatex2e')
"""Formats this writer supports."""
default_stylesheet = 'base.tex'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
settings_spec = (
'LaTeX-Specific Options',
'Note that this LaTeX writer is still EXPERIMENTAL and not '
'feature-complete. ',
(('Specify a stylesheet file. The path is used verbatim to include '
'the file. Overrides --stylesheet-path.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Specify a stylesheet file, relative to the current working '
'directory. Overrides --stylesheet. Default: "%s"'
% default_stylesheet_path,
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet',
'default': default_stylesheet_path}),
('Specify a user stylesheet file. See --stylesheet.',
['--user-stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'user_stylesheet_path'}),
('Specify a user stylesheet file. See --stylesheet-path.',
['--user-stylesheet-path'],
{'metavar': '<file>', 'overrides': 'user_stylesheet'})
),)
settings_defaults = {
# Many Unicode characters are provided by unicode_map.py, so
# we can default to latin-1.
'output_encoding': 'latin-1',
'output_encoding_error_handler': 'strict',
# Since we are using superscript footnotes, it is necessary to
# trim whitespace in front of footnote references.
'trim_footnote_reference_space': 1,
# Currently unsupported:
'docinfo_xform': 0,
# During development:
'traceback': 1
}
relative_path_settings = ('stylesheet_path', 'user_stylesheet_path')
config_section = 'newlatex2e writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def get_transforms(self):
return writers.Writer.get_transforms(self) + [
writer_aux.Compound, writer_aux.Admonitions]
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
assert not visitor.context, 'context not empty: %s' % visitor.context
self.output = visitor.astext()
self.head = visitor.header
self.body = visitor.body
class LaTeXException(Exception):
"""
Exception base class to for exceptions which influence the
automatic generation of LaTeX code.
"""
class SkipAttrParentLaTeX(LaTeXException):
"""
Do not generate ``\DECattr`` and ``\renewcommand{\DEVparent}{...}`` for this
node.
To be raised from ``before_...`` methods.
"""
class SkipParentLaTeX(LaTeXException):
"""
Do not generate ``\renewcommand{\DEVparent}{...}`` for this node.
To be raised from ``before_...`` methods.
"""
class LaTeXTranslator(nodes.SparseNodeVisitor):
# Country code by a.schlock.
# Partly manually converted from iso and babel stuff.
iso639_to_babel = {
'no': 'norsk', # added by hand
'gd': 'scottish', # added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman',
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
# Start with left double quote.
left_quote = 1
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = document.settings
self.header = []
self.body = []
self.context = []
self.stylesheet_path = utils.get_stylesheet_reference(
self.settings, os.path.join(os.getcwd(), 'dummy'))
if self.stylesheet_path:
self.settings.record_dependencies.add(self.stylesheet_path)
# This ugly hack will be cleaned up when refactoring the
# stylesheet mess.
self.settings.stylesheet = self.settings.user_stylesheet
self.settings.stylesheet_path = self.settings.user_stylesheet_path
self.user_stylesheet_path = utils.get_stylesheet_reference(
self.settings, os.path.join(os.getcwd(), 'dummy'))
if self.user_stylesheet_path:
self.settings.record_dependencies.add(self.user_stylesheet_path)
lang = self.settings.language_code or ''
if lang.startswith('de'):
self.double_quote_replacment = "{\\dq}"
elif lang.startswith('it'):
self.double_quote_replacment = r'{\char`\"}'
else:
self.double_quote_replacment = None
self.write_header()
def write_header(self):
a = self.header.append
a('%% Generated by Docutils %s <http://docutils.sourceforge.net>.'
% docutils.__version__)
a('')
a('% Docutils settings:')
lang = self.settings.language_code or ''
a(r'\providecommand{\DEVlanguageiso}{%s}' % lang)
a(r'\providecommand{\DEVlanguagebabel}{%s}' % self.iso639_to_babel.get(
lang, self.iso639_to_babel.get(lang.split('_')[0], '')))
a('')
if self.user_stylesheet_path:
a('% User stylesheet:')
a(r'\input{%s}' % self.user_stylesheet_path)
a('% Docutils stylesheet:')
a(r'\input{%s}' % self.stylesheet_path)
a('')
a('% Default definitions for Docutils nodes:')
for node_name in nodes.node_class_names:
a(r'\providecommand{\DN%s}[1]{#1}' % node_name.replace('_', ''))
a('')
a('% Auxiliary definitions:')
for attr in (r'\DEVparent \DEVattrlen \DEVtitleastext '
r'\DEVsinglebackref \DEVmultiplebackrefs'
).split():
# Later set using \renewcommand.
a(r'\providecommand{%s}{DOCUTILSUNINITIALIZEDVARIABLE}' % attr)
for attr in (r'\DEVparagraphindented \DEVhassubtitle').split():
# Initialize as boolean variables.
a(r'\providecommand{%s}{false}' % attr)
a('\n\n')
unicode_map = unicode_map.unicode_map # comprehensive Unicode map
# Fix problems with unimap.py.
unicode_map.update({
# We have AE or T1 encoding, so "``" etc. work. The macros
# from unimap.py may *not* work.
u'\u201C': '{``}',
u'\u201D': "{''}",
u'\u201E': '{,,}',
})
character_map = {
'\\': r'{\textbackslash}',
'{': r'{\{}',
'}': r'{\}}',
'$': r'{\$}',
'&': r'{\&}',
'%': r'{\%}',
'#': r'{\#}',
'[': r'{[}',
']': r'{]}',
'-': r'{-}',
'`': r'{`}',
"'": r"{'}",
',': r'{,}',
'"': r'{"}',
'|': r'{\textbar}',
'<': r'{\textless}',
'>': r'{\textgreater}',
'^': r'{\textasciicircum}',
'~': r'{\textasciitilde}',
'_': r'{\DECtextunderscore}',
}
character_map.update(unicode_map)
#character_map.update(special_map)
# `att_map` is for encoding attributes. According to
# <http://www-h.eng.cam.ac.uk/help/tpl/textprocessing/teTeX/latex/latex2e-html/ltx-164.html>,
# the following characters are special: # $ % & ~ _ ^ \ { }
# These work without special treatment in macro parameters:
# $, &, ~, _, ^
att_map = {'#': '\\#',
'%': '\\%',
# We cannot do anything about backslashes.
'\\': '',
'{': '\\{',
'}': '\\}',
# The quotation mark may be redefined by babel.
'"': '"{}',
}
att_map.update(unicode_map)
def encode(self, text, attval=None):
"""
Encode special characters in ``text`` and return it.
If attval is true, preserve as much as possible verbatim (used
in attribute value encoding). If attval is 'width' or
'height', `text` is interpreted as a length value.
"""
if attval in ('width', 'height'):
match = re.match(r'([0-9.]+)(\S*)$', text)
assert match, '%s="%s" must be a length' % (attval, text)
value, unit = match.groups()
if unit == '%':
value = str(float(value) / 100)
unit = r'\DECrelativeunit'
elif unit in ('', 'px'):
# If \DECpixelunit is "pt", this gives the same notion
# of pixels as graphicx. This is a bit of a hack.
value = str(float(value) * 0.75)
unit = '\DECpixelunit'
return '%s%s' % (value, unit)
if attval:
get = self.att_map.get
else:
get = self.character_map.get
text = ''.join([get(c, c) for c in text])
if (self.literal_block or self.inline_literal) and not attval:
# NB: We can have inline literals within literal blocks.
# Shrink '\r\n'.
text = text.replace('\r\n', '\n')
# Convert space. If "{ }~~~~~" is wrapped (at the
# brace-enclosed space "{ }"), the following non-breaking
# spaces ("~~~~") do *not* wind up at the beginning of the
# next line. Also note that no hyphenation is done if the
# breaking space ("{ }") comes *after* the non-breaking
# spaces.
if self.literal_block:
# Replace newlines with real newlines.
text = text.replace('\n', '\mbox{}\\\\{}')
replace_fn = self.encode_replace_for_literal_block_spaces
else:
replace_fn = self.encode_replace_for_inline_literal_spaces
text = re.sub(r'\s+', replace_fn, text)
# Protect hyphens; if we don't, line breaks will be
# possible at the hyphens and even the \textnhtt macro
# from the hyphenat package won't change that.
text = text.replace('-', r'\mbox{-}')
text = text.replace("'", r'{\DECtextliteralsinglequote}')
if self.double_quote_replacment is not None:
text = text.replace('"', self.double_quote_replacment)
return text
else:
if not attval:
# Replace space with single protected space.
text = re.sub(r'\s+', '{ }', text)
# Replace double quotes with macro calls.
L = []
for part in text.split(self.character_map['"']):
if L:
# Insert quote.
L.append(self.left_quote and r'{\DECtextleftdblquote}'
or r'{\DECtextrightdblquote}')
self.left_quote = not self.left_quote
L.append(part)
return ''.join(L)
else:
return text
def encode_replace_for_literal_block_spaces(self, match):
return '~' * len(match.group())
def encode_replace_for_inline_literal_spaces(self, match):
return '{ }' + '~' * (len(match.group()) - 1)
def astext(self):
return '\n'.join(self.header) + (''.join(self.body))
def append(self, text, newline='%\n'):
"""
Append text, stripping newlines, producing nice LaTeX code.
"""
lines = [' ' * self.indentation_level + line + newline
for line in text.splitlines(0)]
self.body.append(''.join(lines))
def visit_Text(self, node):
self.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def is_indented(self, paragraph):
"""Return true if `paragraph` should be first-line-indented."""
assert isinstance(paragraph, nodes.paragraph)
siblings = [n for n in paragraph.parent if
self.is_visible(n) and not isinstance(n, nodes.Titular)]
index = siblings.index(paragraph)
if ('continued' in paragraph['classes'] or
index > 0 and isinstance(siblings[index-1], nodes.transition)):
return 0
# Indent all but the first paragraphs.
return index > 0
def before_paragraph(self, node):
self.append(r'\renewcommand{\DEVparagraphindented}{%s}'
% (self.is_indented(node) and 'true' or 'false'))
def before_title(self, node):
self.append(r'\renewcommand{\DEVtitleastext}{%s}'
% self.encode(node.astext()))
self.append(r'\renewcommand{\DEVhassubtitle}{%s}'
% ((len(node.parent) > 2 and
isinstance(node.parent[1], nodes.subtitle))
and 'true' or 'false'))
def before_generated(self, node):
if 'sectnum' in node['classes']:
node[0] = node[0].strip()
literal_block = 0
def visit_literal_block(self, node):
self.literal_block = 1
def depart_literal_block(self, node):
self.literal_block = 0
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
inline_literal = 0
def visit_literal(self, node):
self.inline_literal += 1
def depart_literal(self, node):
self.inline_literal -= 1
def _make_encodable(self, text):
"""
Return text (a unicode object) with all unencodable characters
replaced with '?'.
Thus, the returned unicode string is guaranteed to be encodable.
"""
encoding = self.settings.output_encoding
return text.encode(encoding, 'replace').decode(encoding)
def visit_comment(self, node):
"""
Insert the comment unchanged into the document, replacing
unencodable characters with '?'.
(This is done in order not to fail if comments contain unencodable
characters, because our default encoding is not UTF-8.)
"""
self.append('\n'.join(['% ' + self._make_encodable(line) for line
in node.astext().splitlines(0)]), newline='\n')
raise nodes.SkipChildren
def before_topic(self, node):
if 'contents' in node['classes']:
for bullet_list in list(node.traverse(nodes.bullet_list)):
p = bullet_list.parent
if isinstance(p, nodes.list_item):
p.parent.insert(p.parent.index(p) + 1, bullet_list)
del p[1]
for paragraph in node.traverse(nodes.paragraph):
paragraph.attributes.update(paragraph[0].attributes)
paragraph[:] = paragraph[0]
paragraph.parent['tocrefid'] = paragraph['refid']
node['contents'] = 1
else:
node['contents'] = 0
bullet_list_level = 0
def visit_bullet_list(self, node):
self.append(r'\DECsetbullet{\labelitem%s}' %
['i', 'ii', 'iii', 'iv'][min(self.bullet_list_level, 3)])
self.bullet_list_level += 1
def depart_bullet_list(self, node):
self.bullet_list_level -= 1
enum_styles = {'arabic': 'arabic', 'loweralpha': 'alph', 'upperalpha':
'Alph', 'lowerroman': 'roman', 'upperroman': 'Roman'}
enum_counter = 0
def visit_enumerated_list(self, node):
# We create our own enumeration list environment. This allows
# to set the style and starting value and unlimited nesting.
# Maybe the actual creation (\DEC) can be moved to the
# stylesheet?
self.enum_counter += 1
enum_prefix = self.encode(node['prefix'])
enum_suffix = self.encode(node['suffix'])
enum_type = '\\' + self.enum_styles.get(node['enumtype'], r'arabic')
start = node.get('start', 1) - 1
counter = 'Denumcounter%d' % self.enum_counter
self.append(r'\DECmakeenumeratedlist{%s}{%s}{%s}{%s}{%s}{'
% (enum_prefix, enum_type, enum_suffix, counter, start))
# for Emacs: }
def depart_enumerated_list(self, node):
self.append('}') # for Emacs: {
def before_list_item(self, node):
# XXX needs cleanup.
if (len(node) and (isinstance(node[-1], nodes.TextElement) or
isinstance(node[-1], nodes.Text)) and
node.parent.index(node) == len(node.parent) - 1):
node['lastitem'] = 'true'
before_line = before_list_item
def before_raw(self, node):
if 'latex' in node.get('format', '').split():
# We're inserting the text in before_raw and thus outside
# of \DN... and \DECattr in order to make grouping with
# curly brackets work.
self.append(node.astext())
raise nodes.SkipChildren
def process_backlinks(self, node, type):
"""
Add LaTeX handling code for backlinks of footnote or citation
node `node`. `type` is either 'footnote' or 'citation'.
"""
self.append(r'\renewcommand{\DEVsinglebackref}{}')
self.append(r'\renewcommand{\DEVmultiplebackrefs}{}')
if len(node['backrefs']) > 1:
refs = []
for i in range(len(node['backrefs'])):
# \DECmulticitationbacklink or \DECmultifootnotebacklink.
refs.append(r'\DECmulti%sbacklink{%s}{%s}'
% (type, node['backrefs'][i], i + 1))
self.append(r'\renewcommand{\DEVmultiplebackrefs}{(%s){ }}'
% ', '.join(refs))
elif len(node['backrefs']) == 1:
self.append(r'\renewcommand{\DEVsinglebackref}{%s}'
% node['backrefs'][0])
def visit_footnote(self, node):
self.process_backlinks(node, 'footnote')
def visit_citation(self, node):
self.process_backlinks(node, 'citation')
def before_table(self, node):
# A table contains exactly one tgroup. See before_tgroup.
pass
def before_tgroup(self, node):
widths = []
total_width = 0
for i in range(int(node['cols'])):
assert isinstance(node[i], nodes.colspec)
widths.append(int(node[i]['colwidth']) + 1)
total_width += widths[-1]
del node[:len(widths)]
tablespec = '|'
for w in widths:
# 0.93 is probably wrong in many cases. XXX Find a
# solution which works *always*.
tablespec += r'p{%s\textwidth}|' % (0.93 * w /
max(total_width, 60))
self.append(r'\DECmaketable{%s}{' % tablespec)
self.context.append('}')
raise SkipAttrParentLaTeX
def depart_tgroup(self, node):
self.append(self.context.pop())
def before_row(self, node):
raise SkipAttrParentLaTeX
def before_thead(self, node):
raise SkipAttrParentLaTeX
def before_tbody(self, node):
raise SkipAttrParentLaTeX
def is_simply_entry(self, node):
return (len(node) == 1 and isinstance(node[0], nodes.paragraph) or
len(node) == 0)
def before_entry(self, node):
is_leftmost = 0
if node.hasattr('morerows'):
self.document.reporter.severe('Rowspans are not supported.')
# Todo: Add empty cells below rowspanning cell and issue
# warning instead of severe.
if node.hasattr('morecols'):
# The author got a headache trying to implement
# multicolumn support.
if not self.is_simply_entry(node):
self.document.reporter.severe(
'Colspanning table cells may only contain one paragraph.')
# Todo: Same as above.
# The number of columns this entry spans (as a string).
colspan = int(node['morecols']) + 1
del node['morecols']
else:
colspan = 1
# Macro to call -- \DECcolspan or \DECcolspanleft.
macro_name = r'\DECcolspan'
if node.parent.index(node) == 0:
# Leftmost column.
macro_name += 'left'
is_leftmost = 1
if colspan > 1:
self.append('%s{%s}{' % (macro_name, colspan))
self.context.append('}')
else:
# Do not add a multicolumn with colspan 1 beacuse we need
# at least one non-multicolumn cell per column to get the
# desired column widths, and we can only do colspans with
# cells consisting of only one paragraph.
if not is_leftmost:
self.append(r'\DECsubsequententry{')
self.context.append('}')
else:
self.context.append('')
if isinstance(node.parent.parent, nodes.thead):
node['tableheaderentry'] = 'true'
# Don't add \renewcommand{\DEVparent}{...} because there must
# not be any non-expandable commands in front of \multicolumn.
raise SkipParentLaTeX
def depart_entry(self, node):
self.append(self.context.pop())
def before_substitution_definition(self, node):
raise nodes.SkipNode
indentation_level = 0
def node_name(self, node):
return node.__class__.__name__.replace('_', '')
# Attribute propagation order.
attribute_order = ['align', 'classes', 'ids']
def attribute_cmp(self, a1, a2):
"""
Compare attribute names `a1` and `a2`. Used in
propagate_attributes to determine propagation order.
See built-in function `cmp` for return value.
"""
if a1 in self.attribute_order and a2 in self.attribute_order:
return cmp(self.attribute_order.index(a1),
self.attribute_order.index(a2))
if (a1 in self.attribute_order) != (a2 in self.attribute_order):
# Attributes not in self.attribute_order come last.
return a1 in self.attribute_order and -1 or 1
else:
return cmp(a1, a2)
def propagate_attributes(self, node):
# Propagate attributes using \DECattr macros.
node_name = self.node_name(node)
attlist = []
if isinstance(node, nodes.Element):
attlist = node.attlist()
attlist.sort(lambda pair1, pair2: self.attribute_cmp(pair1[0],
pair2[0]))
# `numatts` may be greater than len(attlist) due to list
# attributes.
numatts = 0
pass_contents = self.pass_contents(node)
for key, value in attlist:
if isinstance(value, list):
self.append(r'\renewcommand{\DEVattrlen}{%s}' % len(value))
for i in range(len(value)):
self.append(r'\DECattr{%s}{%s}{%s}{%s}{' %
(i+1, key, self.encode(value[i], attval=key),
node_name))
if not pass_contents:
self.append('}')
numatts += len(value)
else:
self.append(r'\DECattr{}{%s}{%s}{%s}{' %
(key, self.encode(unicode(value), attval=key),
node_name))
if not pass_contents:
self.append('}')
numatts += 1
if pass_contents:
self.context.append('}' * numatts) # for Emacs: {
else:
self.context.append('')
def visit_docinfo(self, node):
raise NotImplementedError('Docinfo not yet implemented.')
def visit_document(self, node):
document = node
# Move IDs into TextElements. This won't work for images.
# Need to review this.
for node in document.traverse(nodes.Element):
if 'ids' in node and not isinstance(node,
nodes.TextElement):
next_text_element = node.next_node(nodes.TextElement)
if next_text_element:
next_text_element['ids'].extend(node['ids'])
node['ids'] = []
def pass_contents(self, node):
r"""
Return True if the node contents should be passed in
\DN<nodename>{<contents>} and \DECattr{}{}{}{}{<contents>}.
Return False if the node contents should be passed in
\DECvisit<nodename> <contents> \DECdepart<nodename>, and no
attribute handler should be called.
"""
# Passing the whole document or whole sections as parameters
# to \DN... or \DECattr causes LaTeX to run out of memory.
return not isinstance(node, (nodes.document, nodes.section))
def dispatch_visit(self, node):
skip_attr = skip_parent = 0
# TreePruningException to be propagated.
tree_pruning_exception = None
if hasattr(self, 'before_' + node.__class__.__name__):
try:
getattr(self, 'before_' + node.__class__.__name__)(node)
except SkipParentLaTeX:
skip_parent = 1
except SkipAttrParentLaTeX:
skip_attr = 1
skip_parent = 1
except nodes.SkipNode:
raise
except (nodes.SkipChildren, nodes.SkipSiblings), instance:
tree_pruning_exception = instance
except nodes.SkipDeparture:
raise NotImplementedError(
'SkipDeparture not usable in LaTeX writer')
if not isinstance(node, nodes.Text):
node_name = self.node_name(node)
# attribute_deleters will be appended to self.context.
attribute_deleters = []
if not skip_parent and not isinstance(node, nodes.document):
self.append(r'\renewcommand{\DEVparent}{%s}'
% self.node_name(node.parent))
for name, value in node.attlist():
if not isinstance(value, list) and not ':' in name:
# For non-list and non-special (like
# 'xml:preserve') attributes, set
# \DEVcurrentN<nodename>A<attribute> to the
# attribute value, so that the value of the
# attribute is available in the node handler
# and all children.
macro = r'\DEVcurrentN%sA%s' % (node_name, name)
self.append(r'\def%s{%s}' % (
macro, self.encode(unicode(value), attval=name)))
# Make the attribute undefined afterwards.
attribute_deleters.append(r'\let%s=\relax' % macro)
self.context.append('\n'.join(attribute_deleters))
if self.pass_contents(node):
# Call \DN<nodename>{<contents>}.
self.append(r'\DN%s{' % node_name)
self.context.append('}')
else:
# Call \DECvisit<nodename> <contents>
# \DECdepart<nodename>. (Maybe we should use LaTeX
# environments for this?)
self.append(r'\DECvisit%s' % node_name)
self.context.append(r'\DECdepart%s' % node_name)
self.indentation_level += 1
if not skip_attr:
self.propagate_attributes(node)
else:
self.context.append('')
if (isinstance(node, nodes.TextElement) and
not isinstance(node.parent, nodes.TextElement)):
# Reset current quote to left.
self.left_quote = 1
# Call visit_... method.
try:
nodes.SparseNodeVisitor.dispatch_visit(self, node)
except LaTeXException:
raise NotImplementedError(
'visit_... methods must not raise LaTeXExceptions')
if tree_pruning_exception:
# Propagate TreePruningException raised in before_... method.
raise tree_pruning_exception
def is_invisible(self, node):
# Return true if node is invisible or moved away in the LaTeX
# rendering.
return (not isinstance(node, nodes.Text) and
(isinstance(node, nodes.Invisible) or
isinstance(node, nodes.footnote) or
isinstance(node, nodes.citation) or
# Assume raw nodes to be invisible.
isinstance(node, nodes.raw) or
# Floating image or figure.
node.get('align') in ('left', 'right')))
def is_visible(self, node):
return not self.is_invisible(node)
def needs_space(self, node):
"""Two nodes for which `needs_space` is true need auxiliary space."""
# Return true if node is a visible block-level element.
return ((isinstance(node, nodes.Body) or
isinstance(node, nodes.topic)) and
not (self.is_invisible(node) or
isinstance(node.parent, nodes.TextElement)))
def always_needs_space(self, node):
"""
Always add space around nodes for which `always_needs_space()`
is true, regardless of whether the other node needs space as
well. (E.g. transition next to section.)
"""
return isinstance(node, nodes.transition)
def dispatch_departure(self, node):
# Call departure method.
nodes.SparseNodeVisitor.dispatch_departure(self, node)
if not isinstance(node, nodes.Text):
# Close attribute and node handler call (\DN...{...}).
self.indentation_level -= 1
self.append(self.context.pop() + self.context.pop())
# Delete \DECcurrentN... attribute macros.
self.append(self.context.pop())
# Get next sibling.
next_node = node.next_node(
ascend=0, siblings=1, descend=0,
condition=self.is_visible)
# Insert space if necessary.
if (self.needs_space(node) and self.needs_space(next_node) or
self.always_needs_space(node) or
self.always_needs_space(next_node)):
if isinstance(node, nodes.paragraph) and isinstance(next_node, nodes.paragraph):
# Space between paragraphs.
self.append(r'\DECparagraphspace')
else:
# One of the elements is not a paragraph.
self.append(r'\DECauxiliaryspace')
| Python |
# $Id: __init__.py 6328 2010-05-23 21:20:29Z gbrandl $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
PEP HTML Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import codecs
import docutils
from docutils import frontend, nodes, utils, writers
from docutils.writers import html4css1
class Writer(html4css1.Writer):
default_stylesheet = 'pep.css'
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = html4css1.Writer.settings_spec + (
'PEP/HTML-Specific Options',
'For the PEP/HTML writer, the default value for the --stylesheet-path '
'option is "%s", and the default value for --template is "%s". '
'See HTML-Specific Options above.'
% (default_stylesheet_path, default_template_path),
(('Python\'s home URL. Default is "http://www.python.org".',
['--python-home'],
{'default': 'http://www.python.org', 'metavar': '<URL>'}),
('Home URL prefix for PEPs. Default is "." (current directory).',
['--pep-home'],
{'default': '.', 'metavar': '<URL>'}),
# For testing.
(frontend.SUPPRESS_HELP,
['--no-random'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
settings_default_overrides = {'stylesheet_path': default_stylesheet_path,
'template': default_template_path,}
relative_path_settings = (html4css1.Writer.relative_path_settings
+ ('template',))
config_section = 'pep_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = HTMLTranslator
def interpolation_dict(self):
subs = html4css1.Writer.interpolation_dict(self)
settings = self.document.settings
pyhome = settings.python_home
subs['pyhome'] = pyhome
subs['pephome'] = settings.pep_home
if pyhome == '..':
subs['pepindex'] = '.'
else:
subs['pepindex'] = pyhome + '/dev/peps'
index = self.document.first_child_matching_class(nodes.field_list)
header = self.document[index]
self.pepnum = header[0][1].astext()
subs['pep'] = self.pepnum
if settings.no_random:
subs['banner'] = 0
else:
import random
subs['banner'] = random.randrange(64)
try:
subs['pepnum'] = '%04i' % int(self.pepnum)
except ValueError:
subs['pepnum'] = self.pepnum
self.title = header[1][1].astext()
subs['title'] = self.title
subs['body'] = ''.join(
self.body_pre_docinfo + self.docinfo + self.body)
return subs
def assemble_parts(self):
html4css1.Writer.assemble_parts(self)
self.parts['title'] = [self.title]
self.parts['pepnum'] = self.pepnum
class HTMLTranslator(html4css1.HTMLTranslator):
def depart_field_list(self, node):
html4css1.HTMLTranslator.depart_field_list(self, node)
if 'rfc2822' in node['classes']:
self.body.append('<hr />\n')
| Python |
# $Id: pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes indented pseudo-XML.
"""
__docformat__ = 'reStructuredText'
from docutils import writers
class Writer(writers.Writer):
supported = ('pprint', 'pformat', 'pseudoxml')
"""Formats this writer supports."""
config_section = 'pseudoxml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def translate(self):
self.output = self.document.pformat()
def supports(self, format):
"""This writer supports all format-specific elements."""
return 1
| Python |
# $Id: __init__.py 6111 2009-09-02 21:36:05Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Writer modules.
"""
__docformat__ = 'reStructuredText'
import os.path
import docutils
from docutils import languages, Component
from docutils.transforms import universal
class Writer(Component):
"""
Abstract base class for docutils Writers.
Each writer module or package must export a subclass also called 'Writer'.
Each writer must support all standard node types listed in
`docutils.nodes.node_class_names`.
The `write()` method is the main entry point.
"""
component_type = 'writer'
config_section = 'writers'
def get_transforms(self):
return Component.get_transforms(self) + [
universal.Messages,
universal.FilterMessages,
universal.StripClassesAndElements,]
document = None
"""The document to write (Docutils doctree); set by `write`."""
output = None
"""Final translated form of `document` (Unicode string for text, binary
string for other forms); set by `translate`."""
language = None
"""Language module for the document; set by `write`."""
destination = None
"""`docutils.io` Output object; where to write the document.
Set by `write`."""
def __init__(self):
# Used by HTML and LaTex writer for output fragments:
self.parts = {}
"""Mapping of document part names to fragments of `self.output`.
Values are Unicode strings; encoding is up to the client. The 'whole'
key should contain the entire document output.
"""
def write(self, document, destination):
"""
Process a document into its final form.
Translate `document` (a Docutils document tree) into the Writer's
native format, and write it out to its `destination` (a
`docutils.io.Output` subclass object).
Normally not overridden or extended in subclasses.
"""
self.document = document
self.language = languages.get_language(
document.settings.language_code)
self.destination = destination
self.translate()
output = self.destination.write(self.output)
return output
def translate(self):
"""
Do final translation of `self.document` into `self.output`. Called
from `write`. Override in subclasses.
Usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or
`docutils.nodes.Node.walkabout()`. The ``NodeVisitor`` subclass must
support all standard elements (listed in
`docutils.nodes.node_class_names`) and possibly non-standard elements
used by the current Reader as well.
"""
raise NotImplementedError('subclass must override this method')
def assemble_parts(self):
"""Assemble the `self.parts` dictionary. Extend in subclasses."""
self.parts['whole'] = self.output
self.parts['encoding'] = self.document.settings.output_encoding
self.parts['version'] = docutils.__version__
class UnfilteredWriter(Writer):
"""
A writer that passes the document tree on unchanged (e.g. a
serializer.)
Documents written by UnfilteredWriters are typically reused at a
later date using a subclass of `readers.ReReader`.
"""
def get_transforms(self):
# Do not add any transforms. When the document is reused
# later, the then-used writer will add the appropriate
# transforms.
return Component.get_transforms(self)
_writer_aliases = {
'html': 'html4css1',
'latex': 'latex2e',
'pprint': 'pseudoxml',
'pformat': 'pseudoxml',
'pdf': 'rlpdf',
'xml': 'docutils_xml',
's5': 's5_html'}
def get_writer_class(writer_name):
"""Return the Writer class from the `writer_name` module."""
writer_name = writer_name.lower()
if writer_name in _writer_aliases:
writer_name = _writer_aliases[writer_name]
module = __import__(writer_name, globals(), locals())
return module.Writer
| Python |
# $Id: docutils_xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes Docutils XML.
"""
__docformat__ = 'reStructuredText'
import docutils
from docutils import frontend, writers
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
'Warning: the --newlines and --indents options may adversely affect '
'whitespace; use them only for reading convenience.',
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
def translate(self):
settings = self.document.settings
indent = newline = ''
if settings.newlines:
newline = '\n'
if settings.indents:
newline = '\n'
indent = ' '
output_prefix = []
if settings.xml_declaration:
output_prefix.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
output_prefix.append(self.doctype)
output_prefix.append(self.generator % docutils.__version__)
docnode = self.document.asdom().childNodes[0]
self.output = (''.join(output_prefix)
+ docnode.toprettyxml(indent, newline))
| Python |
# $Id: urischemes.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
`schemes` is a dictionary with lowercase URI addressing schemes as
keys and descriptions as values. It was compiled from the index at
http://www.iana.org/assignments/uri-schemes (revised 2005-11-28)
and an older list at http://www.w3.org/Addressing/schemes.html.
"""
# Many values are blank and should be filled in with useful descriptions.
schemes = {
'about': 'provides information on Navigator',
'acap': 'Application Configuration Access Protocol; RFC 2244',
'addbook': "To add vCard entries to Communicator's Address Book",
'afp': 'Apple Filing Protocol',
'afs': 'Andrew File System global file names',
'aim': 'AOL Instant Messenger',
'callto': 'for NetMeeting links',
'castanet': 'Castanet Tuner URLs for Netcaster',
'chttp': 'cached HTTP supported by RealPlayer',
'cid': 'content identifier; RFC 2392',
'crid': 'TV-Anytime Content Reference Identifier; RFC 4078',
'data': ('allows inclusion of small data items as "immediate" data; '
'RFC 2397'),
'dav': 'Distributed Authoring and Versioning Protocol; RFC 2518',
'dict': 'dictionary service protocol; RFC 2229',
'dns': 'Domain Name System resources',
'eid': ('External ID; non-URL data; general escape mechanism to allow '
'access to information for applications that are too '
'specialized to justify their own schemes'),
'fax': ('a connection to a terminal that can handle telefaxes '
'(facsimiles); RFC 2806'),
'feed' : 'NetNewsWire feed',
'file': 'Host-specific file names; RFC 1738',
'finger': '',
'freenet': '',
'ftp': 'File Transfer Protocol; RFC 1738',
'go': 'go; RFC 3368',
'gopher': 'The Gopher Protocol',
'gsm-sms': ('Global System for Mobile Communications Short Message '
'Service'),
'h323': ('video (audiovisual) communication on local area networks; '
'RFC 3508'),
'h324': ('video and audio communications over low bitrate connections '
'such as POTS modem connections'),
'hdl': 'CNRI handle system',
'hnews': 'an HTTP-tunneling variant of the NNTP news protocol',
'http': 'Hypertext Transfer Protocol; RFC 2616',
'https': 'HTTP over SSL; RFC 2818',
'hydra': 'SubEthaEdit URI. See http://www.codingmonkeys.de/subethaedit.',
'iioploc': 'Internet Inter-ORB Protocol Location?',
'ilu': 'Inter-Language Unification',
'im': 'Instant Messaging; RFC 3860',
'imap': 'Internet Message Access Protocol; RFC 2192',
'info': 'Information Assets with Identifiers in Public Namespaces',
'ior': 'CORBA interoperable object reference',
'ipp': 'Internet Printing Protocol; RFC 3510',
'irc': 'Internet Relay Chat',
'iris.beep': 'iris.beep; RFC 3983',
'iseek' : 'See www.ambrosiasw.com; a little util for OS X.',
'jar': 'Java archive',
'javascript': ('JavaScript code; evaluates the expression after the '
'colon'),
'jdbc': 'JDBC connection URI.',
'ldap': 'Lightweight Directory Access Protocol',
'lifn': '',
'livescript': '',
'lrq': '',
'mailbox': 'Mail folder access',
'mailserver': 'Access to data available from mail servers',
'mailto': 'Electronic mail address; RFC 2368',
'md5': '',
'mid': 'message identifier; RFC 2392',
'mocha': '',
'modem': ('a connection to a terminal that can handle incoming data '
'calls; RFC 2806'),
'mtqp': 'Message Tracking Query Protocol; RFC 3887',
'mupdate': 'Mailbox Update (MUPDATE) Protocol; RFC 3656',
'news': 'USENET news; RFC 1738',
'nfs': 'Network File System protocol; RFC 2224',
'nntp': 'USENET news using NNTP access; RFC 1738',
'opaquelocktoken': 'RFC 2518',
'phone': '',
'pop': 'Post Office Protocol; RFC 2384',
'pop3': 'Post Office Protocol v3',
'pres': 'Presence; RFC 3859',
'printer': '',
'prospero': 'Prospero Directory Service; RFC 4157',
'rdar' : ('URLs found in Darwin source '
'(http://www.opensource.apple.com/darwinsource/).'),
'res': '',
'rtsp': 'real time streaming protocol; RFC 2326',
'rvp': '',
'rwhois': '',
'rx': 'Remote Execution',
'sdp': '',
'service': 'service location; RFC 2609',
'shttp': 'secure hypertext transfer protocol',
'sip': 'Session Initiation Protocol; RFC 3261',
'sips': 'secure session intitiaion protocol; RFC 3261',
'smb': 'SAMBA filesystems.',
'snews': 'For NNTP postings via SSL',
'snmp': 'Simple Network Management Protocol; RFC 4088',
'soap.beep': 'RFC 3288',
'soap.beeps': 'RFC 3288',
'ssh': 'Reference to interactive sessions via ssh.',
't120': 'real time data conferencing (audiographics)',
'tag': 'RFC 4151',
'tcp': '',
'tel': ('a connection to a terminal that handles normal voice '
'telephone calls, a voice mailbox or another voice messaging '
'system or a service that can be operated using DTMF tones; '
'RFC 2806.'),
'telephone': 'telephone',
'telnet': 'Reference to interactive sessions; RFC 4248',
'tftp': 'Trivial File Transfer Protocol; RFC 3617',
'tip': 'Transaction Internet Protocol; RFC 2371',
'tn3270': 'Interactive 3270 emulation sessions',
'tv': '',
'urn': 'Uniform Resource Name; RFC 2141',
'uuid': '',
'vemmi': 'versatile multimedia interface; RFC 2122',
'videotex': '',
'view-source': 'displays HTML code that was generated with JavaScript',
'wais': 'Wide Area Information Servers; RFC 4156',
'whodp': '',
'whois++': 'Distributed directory service.',
'x-man-page': ('Opens man page in Terminal.app on OS X '
'(see macosxhints.com)'),
'xmlrpc.beep': 'RFC 3529',
'xmlrpc.beeps': 'RFC 3529',
'z39.50r': 'Z39.50 Retrieval; RFC 2056',
'z39.50s': 'Z39.50 Session; RFC 2056',}
| Python |
# $Id: examples.py 4800 2006-11-12 18:02:01Z goodger $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=1, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
def internals(input_string, source_path=None, destination_path=None,
input_encoding='unicode', settings_overrides=None):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides['input_encoding'] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput, source=input_string,
source_path=source_path,
destination_class=io.NullOutput, destination=None,
destination_path=destination_path,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='null',
settings=None, settings_spec=None, settings_overrides=overrides,
config_section=None, enable_exit_status=None)
return pub.writer.document, pub
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# string_template_compat.py: string.Template for Python <= 2.4
# =====================================================
# This is just an excerpt of the standard string module to provide backwards
# compatibility.
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.