code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import sys
import unittest
import getopt
import getpass
import module_test_runner
import run_data_tests
import run_service_tests
if __name__ == '__main__':
run_data_tests.RunAllTests()
run_service_tests.GetValuesForTestSettingsAndRunAllTests()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import coverage
import all_tests
import atom.core
import atom.http_core
import atom.mock_http_core
import atom.auth
import atom.client
import gdata.gauth
import gdata.client
import gdata.data
import gdata.blogger.data
import gdata.blogger.client
import gdata.spreadsheets.data
from gdata.test_config import settings
# Ensure that coverage tests execute the live requests to the servers, but
# allow use of cached server responses to speed up repeated runs.
settings.RUN_LIVE_TESTS = True
settings.CLEAR_CACHE = False
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
coverage.erase()
coverage.start()
unittest.TextTestRunner().run(all_tests.suite())
coverage.stop()
coverage.report([atom.core, atom.http_core, atom.auth, atom.data,
atom.mock_http_core, atom.client, gdata.gauth, gdata.client,
gdata.core, gdata.data, gdata.blogger.data, gdata.blogger.client,
gdata.spreadsheets.data])
| Python |
#!/usr/bin/python
# -*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import sys
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
from gdata import test_data
import gdata.test_config as conf
class AuthorTest(unittest.TestCase):
def setUp(self):
self.author = atom.Author()
def testEmptyAuthorShouldHaveEmptyExtensionsList(self):
self.assert_(isinstance(self.author.extension_elements, list))
self.assert_(len(self.author.extension_elements) == 0)
def testNormalAuthorShouldHaveNoExtensionElements(self):
self.author.name = atom.Name(text='Jeff Scudder')
self.assert_(self.author.name.text == 'Jeff Scudder')
self.assert_(len(self.author.extension_elements) == 0)
new_author = atom.AuthorFromString(self.author.ToString())
self.assert_(len(self.author.extension_elements) == 0)
self.author.extension_elements.append(atom.ExtensionElement(
'foo', text='bar'))
self.assert_(len(self.author.extension_elements) == 1)
self.assert_(self.author.name.text == 'Jeff Scudder')
new_author = atom.AuthorFromString(self.author.ToString())
self.assert_(len(self.author.extension_elements) == 1)
self.assert_(new_author.name.text == 'Jeff Scudder')
def testEmptyAuthorToAndFromStringShouldMatch(self):
string_from_author = self.author.ToString()
new_author = atom.AuthorFromString(string_from_author)
string_from_new_author = new_author.ToString()
self.assert_(string_from_author == string_from_new_author)
def testAuthorWithNameToAndFromStringShouldMatch(self):
self.author.name = atom.Name()
self.author.name.text = 'Jeff Scudder'
string_from_author = self.author.ToString()
new_author = atom.AuthorFromString(string_from_author)
string_from_new_author = new_author.ToString()
self.assert_(string_from_author == string_from_new_author)
self.assert_(self.author.name.text == new_author.name.text)
def testExtensionElements(self):
self.author.extension_attributes['foo1'] = 'bar'
self.author.extension_attributes['foo2'] = 'rab'
self.assert_(self.author.extension_attributes['foo1'] == 'bar')
self.assert_(self.author.extension_attributes['foo2'] == 'rab')
new_author = atom.AuthorFromString(self.author.ToString())
self.assert_(new_author.extension_attributes['foo1'] == 'bar')
self.assert_(new_author.extension_attributes['foo2'] == 'rab')
def testConvertFullAuthorToAndFromString(self):
author = atom.AuthorFromString(test_data.TEST_AUTHOR)
self.assert_(author.name.text == 'John Doe')
self.assert_(author.email.text == 'johndoes@someemailadress.com')
self.assert_(author.uri.text == 'http://www.google.com')
class EmailTest(unittest.TestCase):
def setUp(self):
self.email = atom.Email()
def testEmailToAndFromString(self):
self.email.text = 'This is a test'
new_email = atom.EmailFromString(self.email.ToString())
self.assert_(self.email.text == new_email.text)
self.assert_(self.email.extension_elements ==
new_email.extension_elements)
class NameTest(unittest.TestCase):
def setUp(self):
self.name = atom.Name()
def testEmptyNameToAndFromStringShouldMatch(self):
string_from_name = self.name.ToString()
new_name = atom.NameFromString(string_from_name)
string_from_new_name = new_name.ToString()
self.assert_(string_from_name == string_from_new_name)
def testText(self):
self.assert_(self.name.text is None)
self.name.text = 'Jeff Scudder'
self.assert_(self.name.text == 'Jeff Scudder')
new_name = atom.NameFromString(self.name.ToString())
self.assert_(new_name.text == self.name.text)
def testExtensionElements(self):
self.name.extension_attributes['foo'] = 'bar'
self.assert_(self.name.extension_attributes['foo'] == 'bar')
new_name = atom.NameFromString(self.name.ToString())
self.assert_(new_name.extension_attributes['foo'] == 'bar')
class ExtensionElementTest(unittest.TestCase):
def setUp(self):
self.ee = atom.ExtensionElement('foo')
def testEmptyEEShouldProduceEmptyString(self):
pass
def testEEParsesTreeCorrectly(self):
deep_tree = atom.ExtensionElementFromString(test_data.EXTENSION_TREE)
self.assert_(deep_tree.tag == 'feed')
self.assert_(deep_tree.namespace == 'http://www.w3.org/2005/Atom')
self.assert_(deep_tree.children[0].tag == 'author')
self.assert_(deep_tree.children[0].namespace == 'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].tag == 'name')
self.assert_(deep_tree.children[0].children[0].namespace ==
'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].text.strip() == 'John Doe')
self.assert_(deep_tree.children[0].children[0].children[0].text.strip() ==
'Bar')
foo = deep_tree.children[0].children[0].children[0]
self.assert_(foo.tag == 'foo')
self.assert_(foo.namespace == 'http://www.google.com')
self.assert_(foo.attributes['up'] == 'down')
self.assert_(foo.attributes['yes'] == 'no')
self.assert_(foo.children == [])
def testEEToAndFromStringShouldMatch(self):
string_from_ee = self.ee.ToString()
new_ee = atom.ExtensionElementFromString(string_from_ee)
string_from_new_ee = new_ee.ToString()
self.assert_(string_from_ee == string_from_new_ee)
deep_tree = atom.ExtensionElementFromString(test_data.EXTENSION_TREE)
string_from_deep_tree = deep_tree.ToString()
new_deep_tree = atom.ExtensionElementFromString(string_from_deep_tree)
string_from_new_deep_tree = new_deep_tree.ToString()
self.assert_(string_from_deep_tree == string_from_new_deep_tree)
class LinkTest(unittest.TestCase):
def setUp(self):
self.link = atom.Link()
def testLinkToAndFromString(self):
self.link.href = 'test href'
self.link.hreflang = 'english'
self.link.type = 'text/html'
self.link.extension_attributes['foo'] = 'bar'
self.assert_(self.link.href == 'test href')
self.assert_(self.link.hreflang == 'english')
self.assert_(self.link.type == 'text/html')
self.assert_(self.link.extension_attributes['foo'] == 'bar')
new_link = atom.LinkFromString(self.link.ToString())
self.assert_(self.link.href == new_link.href)
self.assert_(self.link.type == new_link.type)
self.assert_(self.link.hreflang == new_link.hreflang)
self.assert_(self.link.extension_attributes['foo'] ==
new_link.extension_attributes['foo'])
def testLinkType(self):
test_link = atom.Link(link_type='text/html')
self.assert_(test_link.type == 'text/html')
class GeneratorTest(unittest.TestCase):
def setUp(self):
self.generator = atom.Generator()
def testGeneratorToAndFromString(self):
self.generator.uri = 'www.google.com'
self.generator.version = '1.0'
self.generator.extension_attributes['foo'] = 'bar'
self.assert_(self.generator.uri == 'www.google.com')
self.assert_(self.generator.version == '1.0')
self.assert_(self.generator.extension_attributes['foo'] == 'bar')
new_generator = atom.GeneratorFromString(self.generator.ToString())
self.assert_(self.generator.uri == new_generator.uri)
self.assert_(self.generator.version == new_generator.version)
self.assert_(self.generator.extension_attributes['foo'] ==
new_generator.extension_attributes['foo'])
class TitleTest(unittest.TestCase):
def setUp(self):
self.title = atom.Title()
def testTitleToAndFromString(self):
self.title.type = 'text'
self.title.text = 'Less: <'
self.assert_(self.title.type == 'text')
self.assert_(self.title.text == 'Less: <')
new_title = atom.TitleFromString(self.title.ToString())
self.assert_(self.title.type == new_title.type)
self.assert_(self.title.text == new_title.text)
class SubtitleTest(unittest.TestCase):
def setUp(self):
self.subtitle = atom.Subtitle()
def testTitleToAndFromString(self):
self.subtitle.type = 'text'
self.subtitle.text = 'sub & title'
self.assert_(self.subtitle.type == 'text')
self.assert_(self.subtitle.text == 'sub & title')
new_subtitle = atom.SubtitleFromString(self.subtitle.ToString())
self.assert_(self.subtitle.type == new_subtitle.type)
self.assert_(self.subtitle.text == new_subtitle.text)
class SummaryTest(unittest.TestCase):
def setUp(self):
self.summary = atom.Summary()
def testTitleToAndFromString(self):
self.summary.type = 'text'
self.summary.text = 'Less: <'
self.assert_(self.summary.type == 'text')
self.assert_(self.summary.text == 'Less: <')
new_summary = atom.SummaryFromString(self.summary.ToString())
self.assert_(self.summary.type == new_summary.type)
self.assert_(self.summary.text == new_summary.text)
class CategoryTest(unittest.TestCase):
def setUp(self):
self.category = atom.Category()
def testCategoryToAndFromString(self):
self.category.term = 'x'
self.category.scheme = 'y'
self.category.label = 'z'
self.assert_(self.category.term == 'x')
self.assert_(self.category.scheme == 'y')
self.assert_(self.category.label == 'z')
new_category = atom.CategoryFromString(self.category.ToString())
self.assert_(self.category.term == new_category.term)
self.assert_(self.category.scheme == new_category.scheme)
self.assert_(self.category.label == new_category.label)
class ContributorTest(unittest.TestCase):
def setUp(self):
self.contributor = atom.Contributor()
def testContributorToAndFromString(self):
self.contributor.name = atom.Name(text='J Scud')
self.contributor.email = atom.Email(text='nobody@nowhere')
self.contributor.uri = atom.Uri(text='http://www.google.com')
self.assert_(self.contributor.name.text == 'J Scud')
self.assert_(self.contributor.email.text == 'nobody@nowhere')
self.assert_(self.contributor.uri.text == 'http://www.google.com')
new_contributor = atom.ContributorFromString(self.contributor.ToString())
self.assert_(self.contributor.name.text == new_contributor.name.text)
self.assert_(self.contributor.email.text == new_contributor.email.text)
self.assert_(self.contributor.uri.text == new_contributor.uri.text)
class IdTest(unittest.TestCase):
def setUp(self):
self.my_id = atom.Id()
def testIdToAndFromString(self):
self.my_id.text = 'my nifty id'
self.assert_(self.my_id.text == 'my nifty id')
new_id = atom.IdFromString(self.my_id.ToString())
self.assert_(self.my_id.text == new_id.text)
class IconTest(unittest.TestCase):
def setUp(self):
self.icon = atom.Icon()
def testIconToAndFromString(self):
self.icon.text = 'my picture'
self.assert_(self.icon.text == 'my picture')
new_icon = atom.IconFromString(str(self.icon))
self.assert_(self.icon.text == new_icon.text)
class LogoTest(unittest.TestCase):
def setUp(self):
self.logo = atom.Logo()
def testLogoToAndFromString(self):
self.logo.text = 'my logo'
self.assert_(self.logo.text == 'my logo')
new_logo = atom.LogoFromString(self.logo.ToString())
self.assert_(self.logo.text == new_logo.text)
class RightsTest(unittest.TestCase):
def setUp(self):
self.rights = atom.Rights()
def testContributorToAndFromString(self):
self.rights.text = 'you have the right to remain silent'
self.rights.type = 'text'
self.assert_(self.rights.text == 'you have the right to remain silent')
self.assert_(self.rights.type == 'text')
new_rights = atom.RightsFromString(self.rights.ToString())
self.assert_(self.rights.text == new_rights.text)
self.assert_(self.rights.type == new_rights.type)
class UpdatedTest(unittest.TestCase):
def setUp(self):
self.updated = atom.Updated()
def testUpdatedToAndFromString(self):
self.updated.text = 'my time'
self.assert_(self.updated.text == 'my time')
new_updated = atom.UpdatedFromString(self.updated.ToString())
self.assert_(self.updated.text == new_updated.text)
class PublishedTest(unittest.TestCase):
def setUp(self):
self.published = atom.Published()
def testPublishedToAndFromString(self):
self.published.text = 'pub time'
self.assert_(self.published.text == 'pub time')
new_published = atom.PublishedFromString(self.published.ToString())
self.assert_(self.published.text == new_published.text)
class FeedEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def testConvertToAndFromElementTree(self):
# Use entry because FeedEntryParent doesn't have a tag or namespace.
original = atom.Entry()
copy = atom.FeedEntryParent()
original.author.append(atom.Author(name=atom.Name(text='J Scud')))
self.assert_(original.author[0].name.text == 'J Scud')
self.assert_(copy.author == [])
original.id = atom.Id(text='test id')
self.assert_(original.id.text == 'test id')
self.assert_(copy.id is None)
copy._HarvestElementTree(original._ToElementTree())
self.assert_(original.author[0].name.text == copy.author[0].name.text)
self.assert_(original.id.text == copy.id.text)
class EntryTest(unittest.TestCase):
def testConvertToAndFromString(self):
entry = atom.Entry()
entry.author.append(atom.Author(name=atom.Name(text='js')))
entry.title = atom.Title(text='my test entry')
self.assert_(entry.author[0].name.text == 'js')
self.assert_(entry.title.text == 'my test entry')
new_entry = atom.EntryFromString(entry.ToString())
self.assert_(new_entry.author[0].name.text == 'js')
self.assert_(new_entry.title.text == 'my test entry')
def testEntryCorrectlyConvertsActualData(self):
entry = atom.EntryFromString(test_data.XML_ENTRY_1)
self.assert_(entry.category[0].scheme ==
'http://base.google.com/categories/itemtypes')
self.assert_(entry.category[0].term == 'products')
self.assert_(entry.id.text == ' http://www.google.com/test/id/url ')
self.assert_(entry.title.text == 'Testing 2000 series laptop')
self.assert_(entry.title.type == 'text')
self.assert_(entry.content.type == 'xhtml')
#TODO check all other values for the test entry
def testAppControl(self):
entry = atom.EntryFromString(test_data.TEST_BASE_ENTRY)
self.assertEquals(entry.control.draft.text, 'yes')
self.assertEquals(len(entry.control.extension_elements), 1)
self.assertEquals(entry.control.extension_elements[0].tag, 'disapproved')
class ControlTest(unittest.TestCase):
def testConvertToAndFromString(self):
control = atom.Control()
control.text = 'some text'
control.draft = atom.Draft(text='yes')
self.assertEquals(control.draft.text, 'yes')
self.assertEquals(control.text, 'some text')
self.assertEquals(isinstance(control.draft, atom.Draft), True)
new_control = atom.ControlFromString(str(control))
self.assertEquals(control.draft.text, new_control.draft.text)
self.assertEquals(control.text, new_control.text)
self.assertEquals(isinstance(new_control.draft, atom.Draft), True)
class DraftTest(unittest.TestCase):
def testConvertToAndFromString(self):
draft = atom.Draft()
draft.text = 'maybe'
draft.extension_attributes['foo'] = 'bar'
self.assertEquals(draft.text, 'maybe')
self.assertEquals(draft.extension_attributes['foo'], 'bar')
new_draft = atom.DraftFromString(str(draft))
self.assertEquals(draft.text, new_draft.text)
self.assertEquals(draft.extension_attributes['foo'],
new_draft.extension_attributes['foo'])
class SourceTest(unittest.TestCase):
def testConvertToAndFromString(self):
source = atom.Source()
source.author.append(atom.Author(name=atom.Name(text='js')))
source.title = atom.Title(text='my test source')
source.generator = atom.Generator(text='gen')
self.assert_(source.author[0].name.text == 'js')
self.assert_(source.title.text == 'my test source')
self.assert_(source.generator.text == 'gen')
new_source = atom.SourceFromString(source.ToString())
self.assert_(new_source.author[0].name.text == 'js')
self.assert_(new_source.title.text == 'my test source')
self.assert_(new_source.generator.text == 'gen')
class FeedTest(unittest.TestCase):
def testConvertToAndFromString(self):
feed = atom.Feed()
feed.author.append(atom.Author(name=atom.Name(text='js')))
feed.title = atom.Title(text='my test source')
feed.generator = atom.Generator(text='gen')
feed.entry.append(atom.Entry(author=[atom.Author(name=atom.Name(
text='entry author'))]))
self.assert_(feed.author[0].name.text == 'js')
self.assert_(feed.title.text == 'my test source')
self.assert_(feed.generator.text == 'gen')
self.assert_(feed.entry[0].author[0].name.text == 'entry author')
new_feed = atom.FeedFromString(feed.ToString())
self.assert_(new_feed.author[0].name.text == 'js')
self.assert_(new_feed.title.text == 'my test source')
self.assert_(new_feed.generator.text == 'gen')
self.assert_(new_feed.entry[0].author[0].name.text == 'entry author')
def testPreserveEntryOrder(self):
test_xml = (
'<feed xmlns="http://www.w3.org/2005/Atom">'
'<entry><id>0</id></entry>'
'<entry><id>1</id></entry>'
'<title>Testing Order</title>'
'<entry><id>2</id></entry>'
'<entry><id>3</id></entry>'
'<entry><id>4</id></entry>'
'<entry><id>5</id></entry>'
'<entry><id>6</id></entry>'
'<entry><id>7</id></entry>'
'<author/>'
'<entry><id>8</id></entry>'
'<id>feed_id</id>'
'<entry><id>9</id></entry>'
'</feed>')
feed = atom.FeedFromString(test_xml)
for i in xrange(10):
self.assert_(feed.entry[i].id.text == str(i))
feed = atom.FeedFromString(feed.ToString())
for i in xrange(10):
self.assert_(feed.entry[i].id.text == str(i))
temp = feed.entry[3]
feed.entry[3] = feed.entry[4]
feed.entry[4] = temp
self.assert_(feed.entry[2].id.text == '2')
self.assert_(feed.entry[3].id.text == '4')
self.assert_(feed.entry[4].id.text == '3')
self.assert_(feed.entry[5].id.text == '5')
feed = atom.FeedFromString(feed.ToString())
self.assert_(feed.entry[2].id.text == '2')
self.assert_(feed.entry[3].id.text == '4')
self.assert_(feed.entry[4].id.text == '3')
self.assert_(feed.entry[5].id.text == '5')
class ContentEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def setUp(self):
self.content = atom.Content()
def testConvertToAndFromElementTree(self):
self.content.text = 'my content'
self.content.type = 'text'
self.content.src = 'my source'
self.assert_(self.content.text == 'my content')
self.assert_(self.content.type == 'text')
self.assert_(self.content.src == 'my source')
new_content = atom.ContentFromString(self.content.ToString())
self.assert_(self.content.text == new_content.text)
self.assert_(self.content.type == new_content.type)
self.assert_(self.content.src == new_content.src)
def testContentConstructorSetsSrc(self):
new_content = atom.Content(src='abcd')
self.assertEquals(new_content.src, 'abcd')
class PreserveUnkownElementTest(unittest.TestCase):
"""Tests correct preservation of XML elements which are non Atom"""
def setUp(self):
self.feed = atom.FeedFromString(test_data.GBASE_ATTRIBUTE_FEED)
def testCaptureOpenSearchElements(self):
self.assertEquals(self.feed.FindExtensions('totalResults')[0].tag,
'totalResults')
self.assertEquals(self.feed.FindExtensions('totalResults')[0].namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
open_search_extensions = self.feed.FindExtensions(
namespace='http://a9.com/-/spec/opensearchrss/1.0/')
self.assertEquals(len(open_search_extensions), 3)
for element in open_search_extensions:
self.assertEquals(element.namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
def testCaptureMetaElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_elements), 1)
self.assertEquals(meta_elements[0].attributes['count'], '4416629')
self.assertEquals(len(meta_elements[0].children), 10)
def testCaptureMetaChildElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
meta_children = meta_elements[0].FindChildren(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_children), 10)
for child in meta_children:
self.assertEquals(child.tag, 'value')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = atom.EntryFromString(test_data.XML_ENTRY_1)
def testLinkFinderGetsLicenseLink(self):
self.assertEquals(isinstance(self.entry.GetLicenseLink(), atom.Link),
True)
self.assertEquals(self.entry.GetLicenseLink().href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.GetLicenseLink().rel, 'license')
def testLinkFinderGetsAlternateLink(self):
self.assertEquals(isinstance(self.entry.GetAlternateLink(), atom.Link),
True)
self.assertEquals(self.entry.GetAlternateLink().href,
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.GetAlternateLink().rel, 'alternate')
class AtomBaseTest(unittest.TestCase):
def testAtomBaseConvertsExtensions(self):
# Using Id because it adds no additional members.
atom_base = atom.Id()
extension_child = atom.ExtensionElement('foo', namespace='http://ns0.com')
extension_grandchild = atom.ExtensionElement('bar',
namespace='http://ns0.com')
extension_child.children.append(extension_grandchild)
atom_base.extension_elements.append(extension_child)
self.assertEquals(len(atom_base.extension_elements), 1)
self.assertEquals(len(atom_base.extension_elements[0].children), 1)
self.assertEquals(atom_base.extension_elements[0].tag, 'foo')
self.assertEquals(atom_base.extension_elements[0].children[0].tag, 'bar')
element_tree = atom_base._ToElementTree()
self.assert_(element_tree.find('{http://ns0.com}foo') is not None)
self.assert_(element_tree.find('{http://ns0.com}foo').find(
'{http://ns0.com}bar') is not None)
class UtfParsingTest(unittest.TestCase):
def setUp(self):
self.test_xml = u"""<?xml version="1.0" encoding="utf-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'>
<id>http://www.google.com/test/id/url</id>
<title type='\u03B1\u03BB\u03C6\u03B1'>\u03B1\u03BB\u03C6\u03B1</title>
</entry>"""
def testMemberStringEncoding(self):
atom_entry = atom.EntryFromString(self.test_xml)
#self.assertEqual(atom_entry.title.type.encode('utf-8'),
# u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8'))
#self.assertEqual(atom_entry.title.text.encode('utf-8'),
# u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8'))
# Setting object members to unicode strings is supported even if
# MEMBER_STRING_ENCODING is set 'utf-8' (should it be?)
atom_entry.title.type = u'\u03B1\u03BB\u03C6\u03B1'
xml = atom_entry.ToString()
self.assert_(u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8') in xml)
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is utf8
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
# Test something else than utf-8
atom.MEMBER_STRING_ENCODING = 'iso8859_7'
atom_entry = atom.EntryFromString(self.test_xml)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1'.encode(
'iso8859_7'))
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1'.encode(
'iso8859_7'))
# Test using unicode strings directly for object members
atom.MEMBER_STRING_ENCODING = unicode
atom_entry = atom.EntryFromString(self.test_xml)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is
# unicode
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
def testConvertExampleXML(self):
try:
entry = atom.CreateClassFromXMLString(atom.Entry,
test_data.GBASE_STRING_ENCODING_ENTRY)
except UnicodeDecodeError:
self.fail('Error when converting XML')
class DeprecationDecoratorTest(unittest.TestCase):
def testDeprecationWarning(self):
def to_deprecate():
return 5
self.assertEqual(to_deprecate.func_name, 'to_deprecate')
deprecated = atom.deprecated('test')(to_deprecate)
self.assertNotEqual(to_deprecate, deprecated)
# After decorating a function as deprecated, the function name should
# still be the name of the original function.
self.assertEqual(deprecated.func_name, 'to_deprecate')
#@atom.deprecated()
def also_deprecated():
return 6
also_deprecated = atom.deprecated()(also_deprecated)
self.assertEqual(also_deprecated.func_name, 'also_deprecated')
def suite():
return conf.build_suite([AuthorTest, EmailTest, NameTest,
ExtensionElementTest, LinkTest, GeneratorTest, TitleTest, SubtitleTest,
SummaryTest, IdTest, IconTest, LogoTest, RightsTest, UpdatedTest,
PublishedTest, FeedEntryParentTest, EntryTest, ContentEntryParentTest,
PreserveUnkownElementTest, FeedTest, LinkFinderTest, AtomBaseTest,
UtfParsingTest, DeprecationDecoratorTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.url
import gdata.test_config as conf
class UrlTest(unittest.TestCase):
def testParseUrl(self):
url = atom.url.parse_url('http://www.google.com/calendar/feeds')
self.assert_(url.protocol == 'http')
self.assert_(url.port is None)
self.assert_(url.host == 'www.google.com')
self.assert_(url.path == '/calendar/feeds')
self.assert_(url.params == {})
url = atom.url.parse_url('http://example.com:6091/calendar/feeds')
self.assert_(url.protocol == 'http')
self.assert_(url.host == 'example.com')
self.assert_(url.port == '6091')
self.assert_(url.path == '/calendar/feeds')
self.assert_(url.params == {})
url = atom.url.parse_url('/calendar/feeds?foo=bar')
self.assert_(url.protocol is None)
self.assert_(url.host is None)
self.assert_(url.path == '/calendar/feeds')
self.assert_(len(url.params.keys()) == 1)
self.assert_('foo' in url.params)
self.assert_(url.params['foo'] == 'bar')
url = atom.url.parse_url('/calendar/feeds?my+foo=bar%3Dx')
self.assert_(len(url.params.keys()) == 1)
self.assert_('my foo' in url.params)
self.assert_(url.params['my foo'] == 'bar=x')
def testUrlToString(self):
url = atom.url.Url(port=80)
url.host = 'example.com'
self.assert_(str(url), '//example.com:80')
url = atom.url.Url(protocol='http', host='example.com', path='/feed')
url.params['has spaces'] = 'sneaky=values?&!'
self.assert_(url.to_string() == (
'http://example.com/feed?has+spaces=sneaky%3Dvalues%3F%26%21'))
def testGetRequestUri(self):
url = atom.url.Url(protocol='http', host='example.com', path='/feed')
url.params['has spaces'] = 'sneaky=values?&!'
self.assert_(url.get_request_uri() == (
'/feed?has+spaces=sneaky%3Dvalues%3F%26%21'))
self.assert_(url.get_param_string() == (
'has+spaces=sneaky%3Dvalues%3F%26%21'))
def testComparistons(self):
url1 = atom.url.Url(protocol='http', host='example.com', path='/feed',
params={'x':'1', 'y':'2'})
url2 = atom.url.Url(host='example.com', port=80, path='/feed',
params={'y':'2', 'x':'1'})
self.assertEquals(url1, url2)
url3 = atom.url.Url(host='example.com', port=81, path='/feed',
params={'x':'1', 'y':'2'})
self.assert_(url1 != url3)
self.assert_(url2 != url3)
url4 = atom.url.Url(protocol='ftp', host='example.com', path='/feed',
params={'x':'1', 'y':'2'})
self.assert_(url1 != url4)
self.assert_(url2 != url4)
self.assert_(url3 != url4)
def suite():
return conf.build_suite([UrlTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.mock_http
import atom.http
class MockHttpClientUnitTest(unittest.TestCase):
def setUp(self):
self.client = atom.mock_http.MockHttpClient()
def testRepondToGet(self):
mock_response = atom.http_interface.HttpResponse(body='Hooray!',
status=200, reason='OK')
self.client.add_response(mock_response, 'GET',
'http://example.com/hooray')
response = self.client.request('GET', 'http://example.com/hooray')
self.assertEquals(len(self.client.recordings), 1)
self.assertEquals(response.status, 200)
self.assertEquals(response.read(), 'Hooray!')
def testRecordResponse(self):
# Turn on pass-through record mode.
self.client.real_client = atom.http.ProxiedHttpClient()
live_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
live_response_body = live_response.read()
self.assertEquals(live_response.status, 200)
self.assertEquals(live_response_body.startswith('<?xml'), True)
# Requery for the now canned data.
self.client.real_client = None
canned_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
# The canned response should be the stored response.
canned_response_body = canned_response.read()
self.assertEquals(canned_response.status, 200)
self.assertEquals(canned_response_body, live_response_body)
def testUnrecordedRequest(self):
try:
self.client.request('POST', 'http://example.org')
self.fail()
except atom.mock_http.NoRecordingFound:
pass
def suite():
return unittest.TestSuite(
(unittest.makeSuite(MockHttpClientUnitTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.core
import gdata.test_config as conf
SAMPLE_XML = ('<outer xmlns="http://example.com/xml/1" '
'xmlns:two="http://example.com/xml/2">'
'<inner x="123"/>'
'<inner x="234" y="abc"/>'
'<inner>'
'<two:nested>Some Test</two:nested>'
'<nested>Different Namespace</nested>'
'</inner>'
'<other two:z="true"></other>'
'</outer>')
NO_NAMESPACE_XML = ('<foo bar="123"><baz>Baz Text!</baz></foo>')
V1_XML = ('<able xmlns="http://example.com/1" '
'xmlns:ex="http://example.com/ex/1">'
'<baker foo="42"/>'
'<ex:charlie>Greetings!</ex:charlie>'
'<same xmlns="http://example.com/s" x="true">'
'</able>')
V2_XML = ('<alpha xmlns="http://example.com/2" '
'xmlns:ex="http://example.com/ex/2">'
'<bravo bar="42"/>'
'<ex:charlie>Greetings!</ex:charlie>'
'<same xmlns="http://example.com/s" x="true">'
'</alpha>')
class Child(atom.core.XmlElement):
_qname = ('{http://example.com/1}child', '{http://example.com/2}child')
class Foo(atom.core.XmlElement):
_qname = 'foo'
class Example(atom.core.XmlElement):
_qname = '{http://example.com}foo'
child = Child
foos = [Foo]
tag = 'tag'
versioned_attr = ('attr', '{http://new_ns}attr')
# Example XmlElement subclass declarations.
class Inner(atom.core.XmlElement):
_qname = '{http://example.com/xml/1}inner'
my_x = 'x'
class Outer(atom.core.XmlElement):
_qname = '{http://example.com/xml/1}outer'
innards = [Inner]
class XmlElementTest(unittest.TestCase):
def testGetQName(self):
class Unversioned(atom.core.XmlElement):
_qname = '{http://example.com}foo'
class Versioned(atom.core.XmlElement):
_qname = ('{http://example.com/1}foo', '{http://example.com/2}foo')
self.assert_(
atom.core._get_qname(Unversioned, 1) == '{http://example.com}foo')
self.assert_(
atom.core._get_qname(Unversioned, 2) == '{http://example.com}foo')
self.assert_(
atom.core._get_qname(Versioned, 1) == '{http://example.com/1}foo')
self.assert_(
atom.core._get_qname(Versioned, 2) == '{http://example.com/2}foo')
def testConstructor(self):
e = Example()
self.assert_(e.child is None)
self.assert_(e.tag is None)
self.assert_(e.versioned_attr is None)
self.assert_(e.foos == [])
self.assert_(e.text is None)
def testGetRules(self):
rules1 = Example._get_rules(1)
self.assert_(rules1[0] == '{http://example.com}foo')
self.assert_(rules1[1]['{http://example.com/1}child'] == ('child', Child,
False))
self.assert_(rules1[1]['foo'] == ('foos', Foo, True))
self.assert_(rules1[2]['tag'] == 'tag')
self.assert_(rules1[2]['attr'] == 'versioned_attr')
# Check to make sure we don't recalculate the rules.
self.assert_(rules1 == Example._get_rules(1))
rules2 = Example._get_rules(2)
self.assert_(rules2[0] == '{http://example.com}foo')
self.assert_(rules2[1]['{http://example.com/2}child'] == ('child', Child,
False))
self.assert_(rules2[1]['foo'] == ('foos', Foo, True))
self.assert_(rules2[2]['tag'] == 'tag')
self.assert_(rules2[2]['{http://new_ns}attr'] == 'versioned_attr')
def testGetElements(self):
e = Example()
e.child = Child()
e.child.text = 'child text'
e.foos.append(Foo())
e.foos[0].text = 'foo1'
e.foos.append(Foo())
e.foos[1].text = 'foo2'
e._other_elements.append(atom.core.XmlElement())
e._other_elements[0]._qname = 'bar'
e._other_elements[0].text = 'other1'
e._other_elements.append(atom.core.XmlElement())
e._other_elements[1]._qname = 'child'
e._other_elements[1].text = 'other2'
self.contains_expected_elements(e.get_elements(),
['foo1', 'foo2', 'child text', 'other1', 'other2'])
self.contains_expected_elements(e.get_elements('child'),
['child text', 'other2'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/1'), ['child text'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2'), [])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2', 2), ['child text'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/1', 2), [])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2', 3), ['child text'])
self.contains_expected_elements(e.get_elements('bar'), ['other1'])
self.contains_expected_elements(e.get_elements('bar', version=2),
['other1'])
self.contains_expected_elements(e.get_elements('bar', version=3),
['other1'])
def contains_expected_elements(self, elements, expected_texts):
self.assert_(len(elements) == len(expected_texts))
for element in elements:
self.assert_(element.text in expected_texts)
def testConstructorKwargs(self):
e = Example('hello', child=Child('world'), versioned_attr='1')
self.assert_(e.text == 'hello')
self.assert_(e.child.text == 'world')
self.assert_(e.versioned_attr == '1')
self.assert_(e.foos == [])
self.assert_(e.tag is None)
e = Example(foos=[Foo('1', ignored=1), Foo(text='2')], tag='ok')
self.assert_(e.text is None)
self.assert_(e.child is None)
self.assert_(e.versioned_attr is None)
self.assert_(len(e.foos) == 2)
self.assert_(e.foos[0].text == '1')
self.assert_(e.foos[1].text == '2')
self.assert_('ignored' not in e.foos[0].__dict__)
self.assert_(e.tag == 'ok')
def testParseBasicXmlElement(self):
element = atom.core.xml_element_from_string(SAMPLE_XML,
atom.core.XmlElement)
inners = element.get_elements('inner')
self.assert_(len(inners) == 3)
self.assert_(inners[0].get_attributes('x')[0].value == '123')
self.assert_(inners[0].get_attributes('y') == [])
self.assert_(inners[1].get_attributes('x')[0].value == '234')
self.assert_(inners[1].get_attributes('y')[0].value == 'abc')
self.assert_(inners[2].get_attributes('x') == [])
inners = element.get_elements('inner', 'http://example.com/xml/1')
self.assert_(len(inners) == 3)
inners = element.get_elements(None, 'http://example.com/xml/1')
self.assert_(len(inners) == 4)
inners = element.get_elements()
self.assert_(len(inners) == 4)
inners = element.get_elements('other')
self.assert_(len(inners) == 1)
self.assert_(inners[0].get_attributes(
'z', 'http://example.com/xml/2')[0].value == 'true')
inners = element.get_elements('missing')
self.assert_(len(inners) == 0)
def testBasicXmlElementPreservesMarkup(self):
element = atom.core.xml_element_from_string(SAMPLE_XML,
atom.core.XmlElement)
tree1 = ElementTree.fromstring(SAMPLE_XML)
tree2 = ElementTree.fromstring(element.to_string())
self.assert_trees_similar(tree1, tree2)
def testSchemaParse(self):
outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer)
self.assert_(isinstance(outer.innards, list))
self.assert_(len(outer.innards) == 3)
self.assert_(outer.innards[0].my_x == '123')
def testSchemaParsePreservesMarkup(self):
outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer)
tree1 = ElementTree.fromstring(SAMPLE_XML)
tree2 = ElementTree.fromstring(outer.to_string())
self.assert_trees_similar(tree1, tree2)
found_x_and_y = False
found_x_123 = False
child = tree1.find('{http://example.com/xml/1}inner')
matching_children = tree2.findall(child.tag)
for match in matching_children:
if 'y' in match.attrib and match.attrib['y'] == 'abc':
if match.attrib['x'] == '234':
found_x_and_y = True
self.assert_(match.attrib['x'] == '234')
if 'x' in match.attrib and match.attrib['x'] == '123':
self.assert_('y' not in match.attrib)
found_x_123 = True
self.assert_(found_x_and_y)
self.assert_(found_x_123)
def testGenericTagAndNamespace(self):
element = atom.core.XmlElement(text='content')
# Try setting tag then namespace.
element.tag = 'foo'
self.assert_(element._qname == 'foo')
element.namespace = 'http://example.com/ns'
self.assert_(element._qname == '{http://example.com/ns}foo')
element = atom.core.XmlElement()
# Try setting namespace then tag.
element.namespace = 'http://example.com/ns'
self.assert_(element._qname == '{http://example.com/ns}')
element.tag = 'foo'
self.assert_(element._qname == '{http://example.com/ns}foo')
def assert_trees_similar(self, a, b):
"""Compares two XML trees for approximate matching."""
for child in a:
self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag)))
for child in b:
self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag)))
self.assert_(len(a) == len(b))
self.assert_(a.text == b.text)
self.assert_(a.attrib == b.attrib)
class UtilityFunctionTest(unittest.TestCase):
def testMatchQnames(self):
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, None, '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, None, 'foo'))
self.assert_(atom.core._qname_matches(
None, None, None))
self.assert_(atom.core._qname_matches(
None, None, '{http://example.com}'))
self.assert_(atom.core._qname_matches(
'foo', None, '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, 'http://example.com', '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', 'http://google.com', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://example.com}bar') == False)
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://google.com}foo') == False)
self.assert_(atom.core._qname_matches(
'bar', 'http://example.com', '{http://google.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', None, '{http://example.com}bar') == False)
self.assert_(atom.core._qname_matches(
None, 'http://google.com', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
None, '', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', '', 'bar') == False)
class Chars(atom.core.XmlElement):
_qname = u'{http://example.com/}chars'
y = 'y'
alpha = 'a'
class Strs(atom.core.XmlElement):
_qname = '{http://example.com/}strs'
chars = [Chars]
delta = u'd'
def parse(string):
return atom.core.xml_element_from_string(string, atom.core.XmlElement)
def create(tag, string):
element = atom.core.XmlElement(text=string)
element._qname = tag
return element
class CharacterEncodingTest(unittest.TestCase):
def testUnicodeInputString(self):
# Test parsing the inner text.
self.assertEqual(parse(u'<x>δ</x>').text, u'\u03b4')
self.assertEqual(parse(u'<x>\u03b4</x>').text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse(u'<x>δ</x>').to_string(), '<x>δ</x>')
self.assertEqual(parse(u'<x>\u03b4</x>').to_string(), '<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create(u'x', u'\u03b4')
self.assertEqual(e.to_string(), '<x>δ</x>')
self.assertEqual(e.text, u'\u03b4')
self.assert_(isinstance(e.text, unicode))
self.assertEqual(create(u'x', '\xce\xb4'.decode('utf-8')).to_string(),
'<x>δ</x>')
def testUnicodeTagsAndAttributes(self):
# Begin with test to show underlying ElementTree behavior.
t = ElementTree.fromstring(u'<del\u03b4ta>test</del\u03b4ta>'.encode('utf-8'))
self.assertEqual(t.tag, u'del\u03b4ta')
self.assertEqual(parse(u'<\u03b4elta>test</\u03b4elta>')._qname,
u'\u03b4elta')
# Test unicode attribute names and values.
t = ElementTree.fromstring(u'<x \u03b4a="\u03b4b" />'.encode('utf-8'))
self.assertEqual(t.attrib, {u'\u03b4a': u'\u03b4b'})
self.assertEqual(parse(u'<x \u03b4a="\u03b4b" />').get_attributes(
u'\u03b4a')[0].value, u'\u03b4b')
x = create('x', None)
x._other_attributes[u'a'] = u'\u03b4elta'
self.assert_(x.to_string().startswith('<x a="δelta"'))
def testUtf8InputString(self):
# Test parsing inner text.
self.assertEqual(parse('<x>δ</x>').text, u'\u03b4')
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-8')).text, u'\u03b4')
self.assertEqual(parse('<x>\xce\xb4</x>').text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse('<x>δ</x>').to_string(), '<x>δ</x>')
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-8')).to_string(),
'<x>δ</x>')
self.assertEqual(parse('<x>\xce\xb4</x>').to_string(), '<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create('x', '\xce\xb4')
self.assertEqual(e.to_string(), '<x>δ</x>')
# Don't change the encoding until the we convert to an XML string.
self.assertEqual(e.text, '\xce\xb4')
self.assert_(isinstance(e.text, str))
self.assert_(isinstance(e.to_string(), str))
self.assertEqual(create('x', u'\u03b4'.encode('utf-8')).to_string(),
'<x>δ</x>')
# Test attributes and values with UTF-8 inputs.
self.assertEqual(parse('<x \xce\xb4a="\xce\xb4b" />').get_attributes(
u'\u03b4a')[0].value, u'\u03b4b')
def testUtf8TagsAndAttributes(self):
self.assertEqual(
parse(u'<\u03b4elta>test</\u03b4elta>'.encode('utf-8'))._qname,
u'\u03b4elta')
self.assertEqual(parse('<\xce\xb4elta>test</\xce\xb4elta>')._qname,
u'\u03b4elta')
# Test an element with UTF-8 in the attribute value.
x = create('x', None)
x._other_attributes[u'a'] = '\xce\xb4'
self.assert_(x.to_string(encoding='UTF-8').startswith('<x a="δ"'))
self.assert_(x.to_string().startswith('<x a="δ"'))
def testOtherEncodingOnInputString(self):
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
# Test parsing inner text.
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-16')).text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-16')).to_string(),
'<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create('x', u'\u03b4'.encode('utf-16'))
self.assertEqual(e.to_string(encoding='utf-16'), '<x>δ</x>')
# Don't change the encoding until the we convert to an XML string.
# Allow either little-endian or big-endian byte orderings.
self.assert_(e.text in ['\xff\xfe\xb4\x03', '\xfe\xff\x03\xb4'])
endianness = LITTLE_ENDIAN
if e.text == '\xfe\xff\x03\xb4':
endianness = BIG_ENDIAN
self.assert_(isinstance(e.text, str))
self.assert_(isinstance(e.to_string(encoding='utf-16'), str))
if endianness == LITTLE_ENDIAN:
self.assertEqual(
create('x', '\xff\xfe\xb4\x03').to_string(encoding='utf-16'),
'<x>δ</x>')
else:
self.assertEqual(
create('x', '\xfe\xff\x03\xb4').to_string(encoding='utf-16'),
'<x>δ</x>')
def testOtherEncodingInTagsAndAttributes(self):
self.assertEqual(
parse(u'<\u03b4elta>test</\u03b4elta>'.encode('utf-16'))._qname,
u'\u03b4elta')
# Test an element with UTF-16 in the attribute value.
x = create('x', None)
x._other_attributes[u'a'] = u'\u03b4'.encode('utf-16')
self.assert_(x.to_string(encoding='UTF-16').startswith('<x a="δ"'))
def suite():
return conf.build_suite([XmlElementTest, UtilityFunctionTest,
CharacterEncodingTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
import atom.http_interface
import StringIO
class HttpResponseTest(unittest.TestCase):
def testConstructorWithStrings(self):
resp = atom.http_interface.HttpResponse(body='Hi there!', status=200,
reason='OK', headers={'Content-Length':'9'})
self.assertEqual(resp.read(amt=1), 'H')
self.assertEqual(resp.read(amt=2), 'i ')
self.assertEqual(resp.read(), 'there!')
self.assertEqual(resp.read(), '')
self.assertEqual(resp.reason, 'OK')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.getheader('Content-Length'), '9')
self.assert_(resp.getheader('Missing') is None)
self.assertEqual(resp.getheader('Missing', default='yes'), 'yes')
def suite():
return unittest.TestSuite((unittest.makeSuite(HttpResponseTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# This test may make an actual HTTP request.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import StringIO
import os.path
import atom.mock_http_core
import atom.http_core
class EchoClientTest(unittest.TestCase):
def test_echo_response(self):
client = atom.mock_http_core.EchoHttpClient()
# Send a bare-bones POST request.
request = atom.http_core.HttpRequest(method='POST',
uri=atom.http_core.Uri(host='www.jeffscudder.com', path='/'))
request.add_body_part('hello world!', 'text/plain')
response = client.request(request)
self.assert_(response.getheader('Echo-Host') == 'www.jeffscudder.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') is None)
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Content-Length') == str(len(
'hello world!')))
self.assert_(response.getheader('Content-Type') == 'text/plain')
self.assert_(response.read() == 'hello world!')
# Test a path of None should default to /
request = atom.http_core.HttpRequest(method='POST',
uri=atom.http_core.Uri(host='www.jeffscudder.com', path=None))
response = client.request(request)
self.assert_(response.getheader('Echo-Host') == 'www.jeffscudder.com:None')
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Echo-Uri') == '/')
# Send a multipart request.
request = atom.http_core.HttpRequest(method='POST',
uri=atom.http_core.Uri(scheme='https', host='www.jeffscudder.com',
port=8080, path='/multipart',
query={'test': 'true', 'happy': 'yes'}),
headers={'Authorization':'Test xyzzy', 'Testing':'True'})
request.add_body_part('start', 'text/plain')
request.add_body_part(StringIO.StringIO('<html><body>hi</body></html>'),
'text/html', len('<html><body>hi</body></html>'))
request.add_body_part('alert("Greetings!")', 'text/javascript')
response = client.request(request)
self.assert_(response.getheader('Echo-Host') == 'www.jeffscudder.com:8080')
self.assert_(
response.getheader('Echo-Uri') == '/multipart?test=true&happy=yes')
self.assert_(response.getheader('Echo-Scheme') == 'https')
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Content-Type') == (
'multipart/related; boundary="%s"' % (atom.http_core.MIME_BOUNDARY,)))
expected_body = ('Media multipart posting'
'\r\n--%s\r\n'
'Content-Type: text/plain\r\n\r\n'
'start'
'\r\n--%s\r\n'
'Content-Type: text/html\r\n\r\n'
'<html><body>hi</body></html>'
'\r\n--%s\r\n'
'Content-Type: text/javascript\r\n\r\n'
'alert("Greetings!")'
'\r\n--%s--') % (atom.http_core.MIME_BOUNDARY,
atom.http_core.MIME_BOUNDARY, atom.http_core.MIME_BOUNDARY,
atom.http_core.MIME_BOUNDARY,)
self.assert_(response.read() == expected_body)
self.assert_(response.getheader('Content-Length') == str(
len(expected_body)))
class MockHttpClientTest(unittest.TestCase):
def setUp(self):
self.client = atom.mock_http_core.MockHttpClient()
def test_respond_with_recording(self):
request = atom.http_core.HttpRequest(method='GET')
atom.http_core.parse_uri('http://www.google.com/').modify_request(request)
self.client.add_response(request, 200, 'OK', body='Testing')
response = self.client.request(request)
self.assert_(response.status == 200)
self.assert_(response.reason == 'OK')
self.assert_(response.read() == 'Testing')
def test_save_and_load_recordings(self):
request = atom.http_core.HttpRequest(method='GET')
atom.http_core.parse_uri('http://www.google.com/').modify_request(request)
self.client.add_response(request, 200, 'OK', body='Testing')
response = self.client.request(request)
self.client._save_recordings('test_save_and_load_recordings')
self.client._recordings = []
try:
response = self.client.request(request)
self.fail('There should be no recording for this request.')
except atom.mock_http_core.NoRecordingFound:
pass
self.client._load_recordings('test_save_and_load_recordings')
response = self.client.request(request)
self.assert_(response.status == 200)
self.assert_(response.reason == 'OK')
self.assert_(response.read() == 'Testing')
def test_use_recordings(self):
request = atom.http_core.HttpRequest(method='GET')
atom.http_core.parse_uri('http://www.google.com/').modify_request(request)
self.client._load_or_use_client('test_use_recordings',
atom.http_core.HttpClient())
response = self.client.request(request)
if self.client.real_client:
self.client._save_recordings('test_use_recordings')
self.assert_(response.status == 200)
self.assert_(response.reason == 'OK')
self.assert_(response.getheader('server') == 'gws')
body = response.read()
self.assert_(body.startswith('<!doctype html>'))
def test_match_request(self):
x = atom.http_core.HttpRequest('http://example.com/', 'GET')
y = atom.http_core.HttpRequest('http://example.com/', 'GET')
self.assert_(atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/', 'POST')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/1', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/?gsessionid=1', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/?start_index=1', 'GET')
self.assert_(atom.mock_http_core._match_request(x, y))
x = atom.http_core.HttpRequest('http://example.com/?gsessionid=1', 'GET')
y = atom.http_core.HttpRequest('http://example.com/?gsessionid=1', 'GET')
self.assert_(atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/?gsessionid=2', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
y = atom.http_core.HttpRequest('http://example.com/', 'GET')
self.assert_(not atom.mock_http_core._match_request(x, y))
def test_use_named_sessions(self):
self.client._delete_recordings('mock_http_test.test_use_named_sessions')
self.client.use_cached_session('mock_http_test.test_use_named_sessions',
atom.mock_http_core.EchoHttpClient())
request = atom.http_core.HttpRequest('http://example.com', 'GET')
response = self.client.request(request)
self.assertEqual(response.getheader('Echo-Method'), 'GET')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
# We will insert a Cache-Marker header to indicate that this is a
# recorded response, but initially it should not be present.
self.assertEqual(response.getheader('Cache-Marker'), None)
# Modify the recorded response to allow us to identify a cached result
# from an echoed result. We need to be able to check to see if this
# came from a recording.
self.assert_('Cache-Marker' not in self.client._recordings[0][1]._headers)
self.client._recordings[0][1]._headers['Cache-Marker'] = '1'
self.assert_('Cache-Marker' in self.client._recordings[0][1]._headers)
# Save the recorded responses.
self.client.close_session()
# Create a new client, and have it use the recorded session.
client = atom.mock_http_core.MockHttpClient()
client.use_cached_session('mock_http_test.test_use_named_sessions',
atom.mock_http_core.EchoHttpClient())
# Make the same request, which should use the recorded result.
response = client.request(request)
self.assertEqual(response.getheader('Echo-Method'), 'GET')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
# We should now see the cache marker since the response is replayed.
self.assertEqual(response.getheader('Cache-Marker'), '1')
def suite():
return unittest.TestSuite((unittest.makeSuite(MockHttpClientTest, 'test'),
unittest.makeSuite(EchoClientTest, 'test')))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
# -*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import sys
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.data
import atom.core
import gdata.test_config as conf
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
class AuthorTest(unittest.TestCase):
def setUp(self):
self.author = atom.data.Author()
def testEmptyAuthorShouldHaveEmptyExtensionLists(self):
self.assert_(isinstance(self.author._other_elements, list))
self.assertEqual(len(self.author._other_elements), 0)
self.assert_(isinstance(self.author._other_attributes, dict))
self.assertEqual(len(self.author._other_attributes), 0)
def testNormalAuthorShouldHaveNoExtensionElements(self):
self.author.name = atom.data.Name(text='Jeff Scudder')
self.assertEqual(self.author.name.text, 'Jeff Scudder')
self.assertEqual(len(self.author._other_elements), 0)
new_author = atom.core.XmlElementFromString(self.author.ToString(),
atom.data.Author)
self.assertEqual(len(new_author._other_elements), 0)
self.assertEqual(new_author.name.text, 'Jeff Scudder')
self.author.extension_elements.append(atom.data.ExtensionElement(
'foo', text='bar'))
self.assertEqual(len(self.author.extension_elements), 1)
self.assertEqual(self.author.name.text, 'Jeff Scudder')
new_author = atom.core.parse(self.author.ToString(), atom.data.Author)
self.assertEqual(len(self.author.extension_elements), 1)
self.assertEqual(new_author.name.text, 'Jeff Scudder')
def testEmptyAuthorToAndFromStringShouldMatch(self):
string_from_author = self.author.ToString()
new_author = atom.core.XmlElementFromString(string_from_author,
atom.data.Author)
string_from_new_author = new_author.ToString()
self.assertEqual(string_from_author, string_from_new_author)
def testAuthorWithNameToAndFromStringShouldMatch(self):
self.author.name = atom.data.Name()
self.author.name.text = 'Jeff Scudder'
string_from_author = self.author.ToString()
new_author = atom.core.XmlElementFromString(string_from_author,
atom.data.Author)
string_from_new_author = new_author.ToString()
self.assertEqual(string_from_author, string_from_new_author)
self.assertEqual(self.author.name.text, new_author.name.text)
def testExtensionElements(self):
self.author.extension_attributes['foo1'] = 'bar'
self.author.extension_attributes['foo2'] = 'rab'
self.assertEqual(self.author.extension_attributes['foo1'], 'bar')
self.assertEqual(self.author.extension_attributes['foo2'], 'rab')
new_author = atom.core.parse(str(self.author), atom.data.Author)
self.assertEqual(new_author.extension_attributes['foo1'], 'bar')
self.assertEqual(new_author.extension_attributes['foo2'], 'rab')
def testConvertFullAuthorToAndFromString(self):
TEST_AUTHOR = """<?xml version="1.0" encoding="utf-8"?>
<author xmlns="http://www.w3.org/2005/Atom">
<name xmlns="http://www.w3.org/2005/Atom">John Doe</name>
<email xmlns="http://www.w3.org/2005/Atom">john@example.com</email>
<uri>http://www.google.com</uri>
</author>"""
author = atom.core.parse(TEST_AUTHOR, atom.data.Author)
self.assertEqual(author.name.text, 'John Doe')
self.assertEqual(author.email.text, 'john@example.com')
self.assertEqual(author.uri.text, 'http://www.google.com')
class EmailTest(unittest.TestCase):
def setUp(self):
self.email = atom.data.Email()
def testEmailToAndFromString(self):
self.email.text = 'This is a test'
new_email = atom.core.parse(self.email.to_string(), atom.data.Email)
self.assertEqual(self.email.text, new_email.text)
self.assertEqual(self.email.extension_elements,
new_email.extension_elements)
class NameTest(unittest.TestCase):
def setUp(self):
self.name = atom.data.Name()
def testEmptyNameToAndFromStringShouldMatch(self):
string_from_name = self.name.ToString()
new_name = atom.core.XmlElementFromString(string_from_name,
atom.data.Name)
string_from_new_name = new_name.ToString()
self.assertEqual(string_from_name, string_from_new_name)
def testText(self):
self.assert_(self.name.text is None)
self.name.text = 'Jeff Scudder'
self.assertEqual(self.name.text, 'Jeff Scudder')
new_name = atom.core.parse(self.name.to_string(), atom.data.Name)
self.assertEqual(new_name.text, self.name.text)
def testExtensionElements(self):
self.name.extension_attributes['foo'] = 'bar'
self.assertEqual(self.name.extension_attributes['foo'], 'bar')
new_name = atom.core.parse(self.name.ToString(), atom.data.Name)
self.assertEqual(new_name.extension_attributes['foo'], 'bar')
class ExtensionElementTest(unittest.TestCase):
def setUp(self):
self.ee = atom.data.ExtensionElement('foo')
self.EXTENSION_TREE = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<g:author xmlns:g="http://www.google.com">
<g:name>John Doe
<g:foo yes="no" up="down">Bar</g:foo>
</g:name>
</g:author>
</feed>"""
def testEmptyEEShouldProduceEmptyString(self):
pass
def testEEParsesTreeCorrectly(self):
deep_tree = atom.core.xml_element_from_string(self.EXTENSION_TREE,
atom.data.ExtensionElement)
self.assertEqual(deep_tree.tag, 'feed')
self.assertEqual(deep_tree.namespace, 'http://www.w3.org/2005/Atom')
self.assert_(deep_tree.children[0].tag == 'author')
self.assert_(deep_tree.children[0].namespace == 'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].tag == 'name')
self.assert_(deep_tree.children[0].children[0].namespace ==
'http://www.google.com')
self.assert_(deep_tree.children[0].children[0].text.strip() == 'John Doe')
self.assert_(deep_tree.children[0].children[0].children[0].text.strip() ==
'Bar')
foo = deep_tree.children[0].children[0].children[0]
self.assert_(foo.tag == 'foo')
self.assert_(foo.namespace == 'http://www.google.com')
self.assert_(foo.attributes['up'] == 'down')
self.assert_(foo.attributes['yes'] == 'no')
self.assert_(foo.children == [])
def testEEToAndFromStringShouldMatch(self):
string_from_ee = self.ee.ToString()
new_ee = atom.core.xml_element_from_string(string_from_ee,
atom.data.ExtensionElement)
string_from_new_ee = new_ee.ToString()
self.assert_(string_from_ee == string_from_new_ee)
deep_tree = atom.core.xml_element_from_string(self.EXTENSION_TREE,
atom.data.ExtensionElement)
string_from_deep_tree = deep_tree.ToString()
new_deep_tree = atom.core.xml_element_from_string(string_from_deep_tree,
atom.data.ExtensionElement)
string_from_new_deep_tree = new_deep_tree.ToString()
self.assert_(string_from_deep_tree == string_from_new_deep_tree)
class LinkTest(unittest.TestCase):
def setUp(self):
self.link = atom.data.Link()
def testLinkToAndFromString(self):
self.link.href = 'test href'
self.link.hreflang = 'english'
self.link.type = 'text/html'
self.link.extension_attributes['foo'] = 'bar'
self.assert_(self.link.href == 'test href')
self.assert_(self.link.hreflang == 'english')
self.assert_(self.link.type == 'text/html')
self.assert_(self.link.extension_attributes['foo'] == 'bar')
new_link = atom.core.parse(self.link.ToString(), atom.data.Link)
self.assert_(self.link.href == new_link.href)
self.assert_(self.link.type == new_link.type)
self.assert_(self.link.hreflang == new_link.hreflang)
self.assert_(self.link.extension_attributes['foo'] ==
new_link.extension_attributes['foo'])
def testLinkType(self):
test_link = atom.data.Link(type='text/html')
self.assertEqual(test_link.type, 'text/html')
class GeneratorTest(unittest.TestCase):
def setUp(self):
self.generator = atom.data.Generator()
def testGeneratorToAndFromString(self):
self.generator.uri = 'www.google.com'
self.generator.version = '1.0'
self.generator.extension_attributes['foo'] = 'bar'
self.assert_(self.generator.uri == 'www.google.com')
self.assert_(self.generator.version == '1.0')
self.assert_(self.generator.extension_attributes['foo'] == 'bar')
new_generator = atom.core.parse(self.generator.ToString(), atom.data.Generator)
self.assert_(self.generator.uri == new_generator.uri)
self.assert_(self.generator.version == new_generator.version)
self.assert_(self.generator.extension_attributes['foo'] ==
new_generator.extension_attributes['foo'])
class TitleTest(unittest.TestCase):
def setUp(self):
self.title = atom.data.Title()
def testTitleToAndFromString(self):
self.title.type = 'text'
self.title.text = 'Less: <'
self.assert_(self.title.type == 'text')
self.assert_(self.title.text == 'Less: <')
new_title = atom.core.parse(str(self.title), atom.data.Title)
self.assert_(self.title.type == new_title.type)
self.assert_(self.title.text == new_title.text)
class SubtitleTest(unittest.TestCase):
def setUp(self):
self.subtitle = atom.data.Subtitle()
def testTitleToAndFromString(self):
self.subtitle.type = 'text'
self.subtitle.text = 'sub & title'
self.assert_(self.subtitle.type == 'text')
self.assert_(self.subtitle.text == 'sub & title')
new_subtitle = atom.core.parse(self.subtitle.ToString(),
atom.data.Subtitle)
self.assert_(self.subtitle.type == new_subtitle.type)
self.assert_(self.subtitle.text == new_subtitle.text)
class SummaryTest(unittest.TestCase):
def setUp(self):
self.summary = atom.data.Summary()
def testTitleToAndFromString(self):
self.summary.type = 'text'
self.summary.text = 'Less: <'
self.assert_(self.summary.type == 'text')
self.assert_(self.summary.text == 'Less: <')
new_summary = atom.core.parse(self.summary.ToString(), atom.data.Summary)
self.assert_(self.summary.type == new_summary.type)
self.assert_(self.summary.text == new_summary.text)
class CategoryTest(unittest.TestCase):
def setUp(self):
self.category = atom.data.Category()
def testCategoryToAndFromString(self):
self.category.term = 'x'
self.category.scheme = 'y'
self.category.label = 'z'
self.assert_(self.category.term == 'x')
self.assert_(self.category.scheme == 'y')
self.assert_(self.category.label == 'z')
new_category = atom.core.parse(self.category.to_string(),
atom.data.Category)
self.assert_(self.category.term == new_category.term)
self.assert_(self.category.scheme == new_category.scheme)
self.assert_(self.category.label == new_category.label)
class ContributorTest(unittest.TestCase):
def setUp(self):
self.contributor = atom.data.Contributor()
def testContributorToAndFromString(self):
self.contributor.name = atom.data.Name(text='J Scud')
self.contributor.email = atom.data.Email(text='nobody@nowhere')
self.contributor.uri = atom.data.Uri(text='http://www.google.com')
self.assert_(self.contributor.name.text == 'J Scud')
self.assert_(self.contributor.email.text == 'nobody@nowhere')
self.assert_(self.contributor.uri.text == 'http://www.google.com')
new_contributor = atom.core.parse(self.contributor.ToString(),
atom.data.Contributor)
self.assert_(self.contributor.name.text == new_contributor.name.text)
self.assert_(self.contributor.email.text == new_contributor.email.text)
self.assert_(self.contributor.uri.text == new_contributor.uri.text)
class IdTest(unittest.TestCase):
def setUp(self):
self.my_id = atom.data.Id()
def testIdToAndFromString(self):
self.my_id.text = 'my nifty id'
self.assert_(self.my_id.text == 'my nifty id')
new_id = atom.core.parse(self.my_id.ToString(), atom.data.Id)
self.assert_(self.my_id.text == new_id.text)
class IconTest(unittest.TestCase):
def setUp(self):
self.icon = atom.data.Icon()
def testIconToAndFromString(self):
self.icon.text = 'my picture'
self.assert_(self.icon.text == 'my picture')
new_icon = atom.core.parse(str(self.icon), atom.data.Icon)
self.assert_(self.icon.text == new_icon.text)
class LogoTest(unittest.TestCase):
def setUp(self):
self.logo = atom.data.Logo()
def testLogoToAndFromString(self):
self.logo.text = 'my logo'
self.assert_(self.logo.text == 'my logo')
new_logo = atom.core.parse(self.logo.ToString(), atom.data.Logo)
self.assert_(self.logo.text == new_logo.text)
class RightsTest(unittest.TestCase):
def setUp(self):
self.rights = atom.data.Rights()
def testContributorToAndFromString(self):
self.rights.text = 'you have the right to remain silent'
self.rights.type = 'text'
self.assert_(self.rights.text == 'you have the right to remain silent')
self.assert_(self.rights.type == 'text')
new_rights = atom.core.parse(self.rights.ToString(), atom.data.Rights)
self.assert_(self.rights.text == new_rights.text)
self.assert_(self.rights.type == new_rights.type)
class UpdatedTest(unittest.TestCase):
def setUp(self):
self.updated = atom.data.Updated()
def testUpdatedToAndFromString(self):
self.updated.text = 'my time'
self.assert_(self.updated.text == 'my time')
new_updated = atom.core.parse(self.updated.ToString(), atom.data.Updated)
self.assert_(self.updated.text == new_updated.text)
class PublishedTest(unittest.TestCase):
def setUp(self):
self.published = atom.data.Published()
def testPublishedToAndFromString(self):
self.published.text = 'pub time'
self.assert_(self.published.text == 'pub time')
new_published = atom.core.parse(self.published.ToString(),
atom.data.Published)
self.assert_(self.published.text == new_published.text)
class FeedEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def testConvertToAndFromElementTree(self):
# Use entry because FeedEntryParent doesn't have a tag or namespace.
original = atom.data.Entry()
copy = atom.data.FeedEntryParent()
original.author.append(atom.data.Author(name=atom.data.Name(
text='J Scud')))
self.assert_(original.author[0].name.text == 'J Scud')
self.assert_(copy.author == [])
original.id = atom.data.Id(text='test id')
self.assert_(original.id.text == 'test id')
self.assert_(copy.id is None)
copy._harvest_tree(original._to_tree())
self.assert_(original.author[0].name.text == copy.author[0].name.text)
self.assert_(original.id.text == copy.id.text)
class EntryTest(unittest.TestCase):
def testConvertToAndFromString(self):
entry = atom.data.Entry()
entry.author.append(atom.data.Author(name=atom.data.Name(text='js')))
entry.title = atom.data.Title(text='my test entry')
self.assert_(entry.author[0].name.text == 'js')
self.assert_(entry.title.text == 'my test entry')
new_entry = atom.core.parse(entry.ToString(), atom.data.Entry)
self.assert_(new_entry.author[0].name.text == 'js')
self.assert_(new_entry.title.text == 'my test entry')
def testEntryCorrectlyConvertsActualData(self):
entry = atom.core.parse(XML_ENTRY_1, atom.data.Entry)
self.assert_(entry.category[0].scheme ==
'http://base.google.com/categories/itemtypes')
self.assert_(entry.category[0].term == 'products')
self.assert_(entry.id.text == ' http://www.google.com/test/id/url ')
self.assert_(entry.title.text == 'Testing 2000 series laptop')
self.assert_(entry.title.type == 'text')
self.assert_(entry.content.type == 'xhtml')
#TODO check all other values for the test entry
def testEntryWithFindElementAndFindAttribute(self):
entry = atom.data.Entry()
entry.link.append(atom.data.Link(rel='self', href='x'))
entry.link.append(atom.data.Link(rel='foo', href='y'))
entry.link.append(atom.data.Link(rel='edit',href='z'))
self_link = None
edit_link = None
for link in entry.get_elements('link', 'http://www.w3.org/2005/Atom'):
ignored1, ignored2, attributes = link.__class__._get_rules(2)
if link.get_attributes('rel')[0].value == 'self':
self_link = link.get_attributes('href')[0].value
elif link.get_attributes('rel')[0].value == 'edit':
edit_link = link.get_attributes('href')[0].value
self.assertEqual(self_link, 'x')
self.assertEqual(edit_link, 'z')
def testAppControl(self):
TEST_BASE_ENTRY = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<app:control xmlns:app='http://purl.org/atom/app#'>
<app:draft>yes</app:draft>
<gm:disapproved
xmlns:gm='http://base.google.com/ns-metadata/1.0'/>
</app:control>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
entry = atom.core.parse(TEST_BASE_ENTRY, atom.data.Entry)
self.assertEquals(entry.control.draft.text, 'yes')
self.assertEquals(len(entry.control.extension_elements), 1)
self.assertEquals(entry.control.extension_elements[0].tag, 'disapproved')
class ControlTest(unittest.TestCase):
def testVersionRuleGeneration(self):
self.assertEqual(atom.core._get_qname(atom.data.Control, 1),
'{http://purl.org/atom/app#}control')
self.assertEqual(atom.data.Control._get_rules(1)[0],
'{http://purl.org/atom/app#}control')
def testVersionedControlFromString(self):
xml_v1 = """<control xmlns="http://purl.org/atom/app#">
<draft>no</draft></control>"""
xml_v2 = """<control xmlns="http://www.w3.org/2007/app">
<draft>no</draft></control>"""
control_v1 = atom.core.parse(xml_v1, atom.data.Control, 1)
control_v2 = atom.core.parse(xml_v2, atom.data.Control, 2)
self.assert_(control_v1 is not None)
self.assert_(control_v2 is not None)
# Parsing with mismatched version numbers should return None.
self.assert_(atom.core.parse(xml_v1, atom.data.Control, 2) is None)
self.assert_(atom.core.parse(xml_v2, atom.data.Control, 1) is None)
def testConvertToAndFromString(self):
control = atom.data.Control()
control.text = 'some text'
control.draft = atom.data.Draft(text='yes')
self.assertEquals(control.draft.text, 'yes')
self.assertEquals(control.text, 'some text')
self.assert_(isinstance(control.draft, atom.data.Draft))
new_control = atom.core.parse(str(control), atom.data.Control)
self.assertEquals(control.draft.text, new_control.draft.text)
self.assertEquals(control.text, new_control.text)
self.assert_(isinstance(new_control.draft, atom.data.Draft))
class DraftTest(unittest.TestCase):
def testConvertToAndFromString(self):
draft = atom.data.Draft()
draft.text = 'maybe'
draft.extension_attributes['foo'] = 'bar'
self.assertEquals(draft.text, 'maybe')
self.assertEquals(draft.extension_attributes['foo'], 'bar')
new_draft = atom.core.parse(str(draft), atom.data.Draft)
self.assertEquals(draft.text, new_draft.text)
self.assertEquals(draft.extension_attributes['foo'],
new_draft.extension_attributes['foo'])
class SourceTest(unittest.TestCase):
def testConvertToAndFromString(self):
source = atom.data.Source()
source.author.append(atom.data.Author(name=atom.data.Name(text='js')))
source.title = atom.data.Title(text='my test source')
source.generator = atom.data.Generator(text='gen')
self.assert_(source.author[0].name.text == 'js')
self.assert_(source.title.text == 'my test source')
self.assert_(source.generator.text == 'gen')
new_source = atom.core.parse(source.ToString(), atom.data.Source)
self.assert_(new_source.author[0].name.text == 'js')
self.assert_(new_source.title.text == 'my test source')
self.assert_(new_source.generator.text == 'gen')
class FeedTest(unittest.TestCase):
def testConvertToAndFromString(self):
feed = atom.data.Feed()
feed.author.append(atom.data.Author(name=atom.data.Name(text='js')))
feed.title = atom.data.Title(text='my test source')
feed.generator = atom.data.Generator(text='gen')
feed.entry.append(atom.data.Entry(author=[atom.data.Author(
name=atom.data.Name(text='entry author'))]))
self.assert_(feed.author[0].name.text == 'js')
self.assert_(feed.title.text == 'my test source')
self.assert_(feed.generator.text == 'gen')
self.assert_(feed.entry[0].author[0].name.text == 'entry author')
new_feed = atom.core.parse(feed.ToString(), atom.data.Feed)
self.assert_(new_feed.author[0].name.text == 'js')
self.assert_(new_feed.title.text == 'my test source')
self.assert_(new_feed.generator.text == 'gen')
self.assert_(new_feed.entry[0].author[0].name.text == 'entry author')
def testPreserveEntryOrder(self):
test_xml = (
'<feed xmlns="http://www.w3.org/2005/Atom">'
'<entry><id>0</id></entry>'
'<entry><id>1</id></entry>'
'<title>Testing Order</title>'
'<entry><id>2</id></entry>'
'<entry><id>3</id></entry>'
'<entry><id>4</id></entry>'
'<entry><id>5</id></entry>'
'<entry><id>6</id></entry>'
'<entry><id>7</id></entry>'
'<author/>'
'<entry><id>8</id></entry>'
'<id>feed_id</id>'
'<entry><id>9</id></entry>'
'</feed>')
feed = atom.core.parse(test_xml, atom.data.Feed)
for i in xrange(10):
self.assertEqual(feed.entry[i].id.text, str(i))
feed = atom.core.parse(feed.ToString(), atom.data.Feed)
for i in xrange(10):
self.assertEqual(feed.entry[i].id.text, str(i))
temp = feed.entry[3]
feed.entry[3] = feed.entry[4]
feed.entry[4] = temp
self.assert_(feed.entry[2].id.text == '2')
self.assert_(feed.entry[3].id.text == '4')
self.assert_(feed.entry[4].id.text == '3')
self.assert_(feed.entry[5].id.text == '5')
feed = atom.core.parse(feed.to_string(), atom.data.Feed)
self.assertEqual(feed.entry[2].id.text, '2')
self.assertEqual(feed.entry[3].id.text, '4')
self.assertEqual(feed.entry[4].id.text, '3')
self.assertEqual(feed.entry[5].id.text, '5')
class ContentEntryParentTest(unittest.TestCase):
"""The test accesses hidden methods in atom.FeedEntryParent"""
def setUp(self):
self.content = atom.data.Content()
def testConvertToAndFromElementTree(self):
self.content.text = 'my content'
self.content.type = 'text'
self.content.src = 'my source'
self.assert_(self.content.text == 'my content')
self.assert_(self.content.type == 'text')
self.assert_(self.content.src == 'my source')
new_content = atom.core.parse(self.content.ToString(), atom.data.Content)
self.assert_(self.content.text == new_content.text)
self.assert_(self.content.type == new_content.type)
self.assert_(self.content.src == new_content.src)
def testContentConstructorSetsSrc(self):
new_content = atom.data.Content(src='abcd')
self.assertEquals(new_content.src, 'abcd')
def testContentFromString(self):
content_xml = '<content xmlns="http://www.w3.org/2005/Atom" type="test"/>'
content = atom.core.parse(content_xml, atom.data.Content)
self.assert_(isinstance(content, atom.data.Content))
self.assertEqual(content.type, 'test')
class PreserveUnkownElementTest(unittest.TestCase):
"""Tests correct preservation of XML elements which are non Atom"""
def setUp(self):
GBASE_ATTRIBUTE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes</id>
<updated>2006-11-01T20:35:59.578Z</updated>
<category scheme='http://base.google.com/categories/itemtypes'
term='online jobs'></category>
<category scheme='http://base.google.com/categories/itemtypes'
term='jobs'></category>
<title type='text'>histogram for query: [item type:jobs]</title>
<link rel='alternate' type='text/html'
href='http://base.google.com'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/attributes/jobs'></link>
<generator version='1.0'
uri='http://base.google.com'>GoogleBase</generator>
<openSearch:totalResults>16</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>16</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/attributes/job+industy</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
</feed>"""
self.feed = atom.core.parse(GBASE_ATTRIBUTE_FEED,
atom.data.Feed)
def testCaptureOpenSearchElements(self):
self.assertEquals(self.feed.FindExtensions('totalResults')[0].tag,
'totalResults')
self.assertEquals(self.feed.FindExtensions('totalResults')[0].namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
open_search_extensions = self.feed.FindExtensions(
namespace='http://a9.com/-/spec/opensearchrss/1.0/')
self.assertEquals(len(open_search_extensions), 3)
for element in open_search_extensions:
self.assertEquals(element.namespace,
'http://a9.com/-/spec/opensearchrss/1.0/')
def testCaptureMetaElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_elements), 1)
self.assertEquals(meta_elements[0].attributes['count'], '4416629')
self.assertEquals(len(meta_elements[0].children), 10)
def testCaptureMetaChildElements(self):
meta_elements = self.feed.entry[0].FindExtensions(
namespace='http://base.google.com/ns-metadata/1.0')
meta_children = meta_elements[0].FindChildren(
namespace='http://base.google.com/ns-metadata/1.0')
self.assertEquals(len(meta_children), 10)
for child in meta_children:
self.assertEquals(child.tag, 'value')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(XML_ENTRY_1, atom.data.Entry)
def testLinkFinderGetsLicenseLink(self):
self.assert_(isinstance(self.entry.GetLink('license'), atom.data.Link))
self.assert_(isinstance(self.entry.GetLicenseLink(), atom.data.Link))
self.assertEquals(self.entry.GetLink('license').href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.get_license_link().href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.GetLink('license').rel, 'license')
self.assertEquals(self.entry.FindLicenseLink(),
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
def testLinkFinderGetsAlternateLink(self):
self.assert_(isinstance(self.entry.GetLink('alternate'),
atom.data.Link))
self.assertEquals(self.entry.GetLink('alternate').href,
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.FindAlternateLink(),
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.GetLink('alternate').rel, 'alternate')
class AtomBaseTest(unittest.TestCase):
def testAtomBaseConvertsExtensions(self):
# Using Id because it adds no additional members.
atom_base = atom.data.Id()
extension_child = atom.data.ExtensionElement('foo',
namespace='http://ns0.com')
extension_grandchild = atom.data.ExtensionElement('bar',
namespace='http://ns0.com')
extension_child.children.append(extension_grandchild)
atom_base.extension_elements.append(extension_child)
self.assertEquals(len(atom_base.extension_elements), 1)
self.assertEquals(len(atom_base.extension_elements[0].children), 1)
self.assertEquals(atom_base.extension_elements[0].tag, 'foo')
self.assertEquals(atom_base.extension_elements[0].children[0].tag, 'bar')
element_tree = atom_base._to_tree()
self.assert_(element_tree.find('{http://ns0.com}foo') is not None)
self.assert_(element_tree.find('{http://ns0.com}foo').find(
'{http://ns0.com}bar') is not None)
class UtfParsingTest(unittest.TestCase):
def setUp(self):
self.test_xml = u"""<?xml version="1.0" encoding="utf-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'>
<id>http://www.google.com/test/id/url</id>
<title type='αλφα'>αλφα</title>
</entry>"""
def testMemberStringEncoding(self):
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(isinstance(atom_entry.title.type, unicode))
self.assertEqual(atom_entry.title.type, u'\u03B1\u03BB\u03C6\u03B1')
self.assertEqual(atom_entry.title.text, u'\u03B1\u03BB\u03C6\u03B1')
# Setting object members to unicode strings is supported.
atom_entry.title.type = u'\u03B1\u03BB\u03C6\u03B1'
xml = atom_entry.ToString()
# The unicode code points should be converted to XML escaped sequences.
self.assert_('αλφα' in xml)
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is utf8
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
# Test something else than utf-8
atom.core.STRING_ENCODING = 'iso8859_7'
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Test using unicode strings directly for object members
atom_entry = atom.core.parse(self.test_xml, atom.data.Entry)
self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1')
self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1')
# Make sure that we can use plain text when MEMBER_STRING_ENCODING is
# unicode
atom_entry.title.type = "plain text"
atom_entry.title.text = "more text"
xml = atom_entry.ToString()
self.assert_("plain text" in xml)
self.assert_("more text" in xml)
def testConvertExampleXML(self):
GBASE_STRING_ENCODING_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:gm='http://base.google.com/ns-metadata/1.0'
xmlns:g='http://base.google.com/ns/1.0'
xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets/1749</id>
<published>2007-12-09T03:13:07.000Z</published>
<updated>2008-01-07T03:26:46.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes'
term='Products'/>
<title type='text'>Digital Camera Cord Fits DSC-R1 S40</title>
<content type='html'>SONY \xC2\xB7 Cybershot Digital Camera Usb
Cable DESCRIPTION This is a 2.5 USB 2.0 A to Mini B (5 Pin)
high quality digital camera cable used for connecting your
Sony Digital Cameras and Camcoders. Backward
Compatible with USB 2.0, 1.0 and 1.1. Fully ...</content>
<link rel='alternate' type='text/html'
href='http://adfarm.mediaplex.com/ad/ck/711-5256-8196-2mm'/>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/feeds/snippets/1749'/>
<author>
<name>eBay</name>
</author>
<g:item_type type='text'>Products</g:item_type>
<g:item_language type='text'>EN</g:item_language>
<g:target_country type='text'>US</g:target_country>
<g:price type='floatUnit'>0.99 usd</g:price>
<g:image_link
type='url'>http://www.example.com/pict/27_1.jpg</g:image_link>
<g:category type='text'>Cameras & Photo>Digital Camera
Accessories>Cables</g:category>
<g:category type='text'>Cords & USB Cables</g:category>
<g:customer_id type='int'>11729</g:customer_id>
<g:id type='text'>270195049057</g:id>
<g:expiration_date
type='dateTime'>2008-02-06T03:26:46Z</g:expiration_date>
</entry>"""
try:
entry = atom.core.parse(GBASE_STRING_ENCODING_ENTRY,
atom.data.Entry)
except UnicodeDecodeError:
self.fail('Error when converting XML')
class VersionedXmlTest(unittest.TestCase):
def test_monoversioned_parent_with_multiversioned_child(self):
v2_rules = atom.data.Entry._get_rules(2)
self.assert_('{http://www.w3.org/2007/app}control' in v2_rules[1])
entry_xml = """<entry xmlns='http://www.w3.org/2005/Atom'>
<app:control xmlns:app='http://www.w3.org/2007/app'>
<app:draft>yes</app:draft>
</app:control>
</entry>"""
entry = e = atom.core.parse(entry_xml, atom.data.Entry, version=2)
self.assert_(entry is not None)
self.assert_(entry.control is not None)
self.assert_(entry.control.draft is not None)
self.assertEqual(entry.control.draft.text, 'yes')
# v1 rules should not parse v2 XML.
entry = e = atom.core.parse(entry_xml, atom.data.Entry, version=1)
self.assert_(entry is not None)
self.assert_(entry.control is None)
# The default version should be v1.
entry = e = atom.core.parse(entry_xml, atom.data.Entry)
self.assert_(entry is not None)
self.assert_(entry.control is None)
class DataModelSanityTest(unittest.TestCase):
def test_xml_elements(self):
conf.check_data_classes(self, [
atom.data.Feed, atom.data.Source, atom.data.Logo,
atom.data.Control, atom.data.Draft, atom.data.Generator])
def suite():
return conf.build_suite([AuthorTest, EmailTest, NameTest,
ExtensionElementTest, LinkTest, GeneratorTest,
TitleTest, SubtitleTest, SummaryTest, IdTest,
IconTest, LogoTest, RightsTest, UpdatedTest,
PublishedTest, FeedEntryParentTest, EntryTest,
ContentEntryParentTest, PreserveUnkownElementTest,
FeedTest, LinkFinderTest, AtomBaseTest,
UtfParsingTest, VersionedXmlTest,
DataModelSanityTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.auth
import atom.http_core
class BasicAuthTest(unittest.TestCase):
def test_modify_request(self):
http_request = atom.http_core.HttpRequest()
credentials = atom.auth.BasicAuth('Aladdin', 'open sesame')
self.assert_(credentials.basic_cookie == 'QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
credentials.modify_request(http_request)
self.assert_(http_request.headers[
'Authorization'] == 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
def suite():
return unittest.TestSuite((unittest.makeSuite(BasicAuthTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.http_core
import StringIO
class UriTest(unittest.TestCase):
def test_parse_uri(self):
uri = atom.http_core.parse_uri('http://www.google.com/test?q=foo&z=bar')
self.assert_(uri.scheme == 'http')
self.assert_(uri.host == 'www.google.com')
self.assert_(uri.port is None)
self.assert_(uri.path == '/test')
self.assert_(uri.query == {'z':'bar', 'q':'foo'})
def test_static_parse_uri(self):
uri = atom.http_core.Uri.parse_uri('http://test.com/?token=foo&x=1')
self.assertEqual(uri.scheme, 'http')
self.assertEqual(uri.host, 'test.com')
self.assert_(uri.port is None)
self.assertEqual(uri.query, {'token':'foo', 'x':'1'})
def test_modify_request_no_request(self):
uri = atom.http_core.parse_uri('http://www.google.com/test?q=foo&z=bar')
request = uri.modify_request()
self.assert_(request.uri.scheme == 'http')
self.assert_(request.uri.host == 'www.google.com')
# If no port was provided, the HttpClient is responsible for determining
# the default.
self.assert_(request.uri.port is None)
self.assert_(request.uri.path.startswith('/test'))
self.assertEqual(request.uri.query, {'z': 'bar', 'q': 'foo'})
self.assert_(request.method is None)
self.assert_(request.headers == {})
self.assert_(request._body_parts == [])
def test_modify_request_http_with_set_port(self):
request = atom.http_core.HttpRequest(uri=atom.http_core.Uri(port=8080),
method='POST')
request.add_body_part('hello', 'text/plain')
uri = atom.http_core.parse_uri('//example.com/greet')
self.assert_(uri.query == {})
self.assert_(uri._get_relative_path() == '/greet')
self.assert_(uri.host == 'example.com')
self.assert_(uri.port is None)
uri.ModifyRequest(request)
self.assert_(request.uri.host == 'example.com')
# If no scheme was provided, the URI will not add one, but the HttpClient
# should assume the request is HTTP.
self.assert_(request.uri.scheme is None)
self.assert_(request.uri.port == 8080)
self.assert_(request.uri.path == '/greet')
self.assert_(request.method == 'POST')
self.assert_(request.headers['Content-Type'] == 'text/plain')
def test_modify_request_use_default_ssl_port(self):
request = atom.http_core.HttpRequest(
uri=atom.http_core.Uri(scheme='https'), method='PUT')
request.add_body_part('hello', 'text/plain')
uri = atom.http_core.parse_uri('/greet')
uri.modify_request(request)
self.assert_(request.uri.host is None)
self.assert_(request.uri.scheme == 'https')
# If no port was provided, leave the port as None, it is up to the
# HttpClient to set the correct default port.
self.assert_(request.uri.port is None)
self.assert_(request.uri.path == '/greet')
self.assert_(request.method == 'PUT')
self.assert_(request.headers['Content-Type'] == 'text/plain')
self.assert_(len(request._body_parts) == 1)
self.assert_(request._body_parts[0] == 'hello')
def test_to_string(self):
uri = atom.http_core.Uri(host='www.google.com', query={'q':'sippycode'})
uri_string = uri._to_string()
self.assert_(uri_string == 'http://www.google.com/?q=sippycode')
class HttpRequestTest(unittest.TestCase):
def test_request_with_one_body_part(self):
request = atom.http_core.HttpRequest()
self.assert_(len(request._body_parts) == 0)
self.assert_('Content-Length' not in request.headers)
self.assert_(not 'Content-Type' in request.headers)
self.assert_(not 'Content-Length' in request.headers)
request.add_body_part('this is a test', 'text/plain')
self.assert_(len(request._body_parts) == 1)
self.assert_(request.headers['Content-Type'] == 'text/plain')
self.assert_(request._body_parts[0] == 'this is a test')
self.assert_(request.headers['Content-Length'] == str(len(
'this is a test')))
def test_add_file_without_size(self):
virtual_file = StringIO.StringIO('this is a test')
request = atom.http_core.HttpRequest()
try:
request.add_body_part(virtual_file, 'text/plain')
self.fail('We should have gotten an UnknownSize error.')
except atom.http_core.UnknownSize:
pass
request.add_body_part(virtual_file, 'text/plain', len('this is a test'))
self.assert_(len(request._body_parts) == 1)
self.assert_(request.headers['Content-Type'] == 'text/plain')
self.assert_(request._body_parts[0].read() == 'this is a test')
self.assert_(request.headers['Content-Length'] == str(len(
'this is a test')))
def test_copy(self):
request = atom.http_core.HttpRequest(
uri=atom.http_core.Uri(scheme='https', host='www.google.com'),
method='POST', headers={'test':'1', 'ok':'yes'})
request.add_body_part('body1', 'text/plain')
request.add_body_part('<html>body2</html>', 'text/html')
copied = request._copy()
self.assert_(request.uri.scheme == copied.uri.scheme)
self.assert_(request.uri.host == copied.uri.host)
self.assert_(request.method == copied.method)
self.assert_(request.uri.path == copied.uri.path)
self.assert_(request.headers == copied.headers)
self.assert_(request._body_parts == copied._body_parts)
copied.headers['test'] = '2'
copied._body_parts[1] = '<html>body3</html>'
self.assert_(request.headers != copied.headers)
self.assert_(request._body_parts != copied._body_parts)
def suite():
return unittest.TestSuite((unittest.makeSuite(UriTest,'test'),
unittest.makeSuite(HttpRequestTest,'test')))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.token_store
import atom.http_interface
import atom.service
import atom.url
class TokenStoreTest(unittest.TestCase):
def setUp(self):
self.token = atom.service.BasicAuthToken('aaa1', scopes=[
'http://example.com/', 'http://example.org'])
self.tokens = atom.token_store.TokenStore()
self.tokens.add_token(self.token)
def testAddAndFindTokens(self):
self.assert_(self.tokens.find_token('http://example.com/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/foo?ok=1') == (
self.token))
self.assert_(isinstance(self.tokens.find_token('http://example.net/'),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token('example.com/'),
atom.http_interface.GenericToken))
def testFindTokenUsingMultipleUrls(self):
self.assert_(self.tokens.find_token(
'http://example.com/') == self.token)
self.assert_(self.tokens.find_token(
'http://example.org/bar') == self.token)
self.assert_(isinstance(self.tokens.find_token(''),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token(
'http://example.net/'),
atom.http_interface.GenericToken))
def testFindTokenWithPartialScopes(self):
token = atom.service.BasicAuthToken('aaa1',
scopes=[atom.url.Url(host='www.example.com', path='/foo'),
atom.url.Url(host='www.example.net')])
token_store = atom.token_store.TokenStore()
token_store.add_token(token)
self.assert_(token_store.find_token(
'http://www.example.com/foobar') == token)
self.assert_(token_store.find_token(
'https://www.example.com:443/foobar') == token)
self.assert_(token_store.find_token(
'http://www.example.net/xyz') == token)
self.assert_(token_store.find_token('http://www.example.org/') != token)
self.assert_(isinstance(token_store.find_token('http://example.org/'),
atom.http_interface.GenericToken))
def suite():
return unittest.TestSuite((unittest.makeSuite(TokenStoreTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# This test may make an actual HTTP request.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.http_core
import atom.auth
import atom.client
import atom.mock_http_core
class AtomPubClientEchoTest(unittest.TestCase):
def test_simple_request_with_no_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
self.assert_(client.host is None)
self.assert_(client.auth_token is None)
# Make several equivalent requests.
responses = [client.request('GET', 'http://example.org/'),
client.request(http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri('http', 'example.org', path='/'),
method='GET')),
client.request('GET',
http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri('http', 'example.org',
path='/')))]
for response in responses:
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('User-Agent').startswith('gdata-py/'))
def test_auth_request_with_no_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
token = atom.auth.BasicAuth('Jeff', '123')
response = client.request('POST', 'https://example.net:8080/',
auth_token=token)
self.assert_(response.getheader('Echo-Host') == 'example.net:8080')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'https')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
self.assert_(response.getheader('Echo-Method') == 'POST')
def test_request_with_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient(),
'example.com', atom.auth.BasicAuth('Jeff', '123'))
self.assert_(client.host == 'example.com')
self.assert_(client.auth_token is not None)
self.assert_(client.auth_token.basic_cookie == 'SmVmZjoxMjM=')
response = client.request('GET', 'http://example.org/')
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
response = client.request('GET', '/')
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
response = client.request('GET', '/',
http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri(port=99)))
self.assert_(response.getheader('Echo-Host') == 'example.com:99')
self.assert_(response.getheader('Echo-Uri') == '/')
def test_get(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
response = client.get('http://example.com/simple')
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/simple')
self.assert_(response.getheader('Echo-Method') == 'GET')
response = client.Get(uri='http://example.com/simple2')
self.assert_(response.getheader('Echo-Uri') == '/simple2')
self.assert_(response.getheader('Echo-Method') == 'GET')
def test_modify_request_using_args(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class RequestModifier(object):
def modify_request(self, http_request):
http_request.headers['Special'] = 'Set'
response = client.get('http://example.com/modified',
extra=RequestModifier())
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/modified')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('Special') == 'Set')
def test_post(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class TestData(object):
def modify_request(self, http_request):
http_request.add_body_part('test body', 'text/testdata')
response = client.Post(uri='http://example.com/', data=TestData())
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Content-Length') == str(len('test body')))
self.assert_(response.getheader('Content-Type') == 'text/testdata')
self.assert_(response.read(2) == 'te')
self.assert_(response.read() == 'st body')
response = client.post(data=TestData(), uri='http://example.com/')
self.assert_(response.read() == 'test body')
self.assert_(response.getheader('Content-Type') == 'text/testdata')
# Don't pass in a body, but use an extra kwarg to add the body to the
# http_request.
response = client.post(x=TestData(), uri='http://example.com/')
self.assert_(response.read() == 'test body')
def test_put(self):
body_text = '<put>test</put>'
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class TestData(object):
def modify_request(self, http_request):
http_request.add_body_part(body_text, 'application/xml')
response = client.put('http://example.org', TestData())
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Method') == 'PUT')
self.assert_(response.getheader('Content-Length') == str(len(body_text)))
self.assert_(response.getheader('Content-Type') == 'application/xml')
response = client.put(uri='http://example.org', data=TestData())
self.assert_(response.getheader('Content-Length') == str(len(body_text)))
self.assert_(response.getheader('Content-Type') == 'application/xml')
def test_delete(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient(),
source='my new app')
response = client.Delete('http://example.com/simple')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
self.assertEqual(response.getheader('Echo-Uri'), '/simple')
self.assertEqual(response.getheader('Echo-Method'), 'DELETE')
response = client.delete(uri='http://example.com/d')
self.assertEqual(response.getheader('Echo-Uri'), '/d')
self.assertEqual(response.getheader('Echo-Method'), 'DELETE')
self.assert_(
response.getheader('User-Agent').startswith('my new app gdata-py/'))
def suite():
return unittest.TestSuite((unittest.makeSuite(AtomPubClientEchoTest, 'test'),
))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import unittest
import atom.service
import atom.mock_http_core
import gdata.test_config as conf
class AtomServiceUnitTest(unittest.TestCase):
def testBuildUriWithNoParams(self):
x = atom.service.BuildUri('/base/feeds/snippets')
self.assert_(x == '/base/feeds/snippets')
def testBuildUriWithParams(self):
# Add parameters to a URI
x = atom.service.BuildUri('/base/feeds/snippets', url_params={'foo': 'bar',
'bq': 'digital camera'})
self.assert_(x == '/base/feeds/snippets?foo=bar&bq=digital+camera')
self.assert_(x.startswith('/base/feeds/snippets'))
self.assert_(x.count('?') == 1)
self.assert_(x.count('&') == 1)
self.assert_(x.index('?') < x.index('&'))
self.assert_(x.index('bq=digital+camera') != -1)
# Add parameters to a URI that already has parameters
x = atom.service.BuildUri('/base/feeds/snippets?bq=digital+camera',
url_params={'foo': 'bar', 'max-results': '250'})
self.assert_(x.startswith('/base/feeds/snippets?bq=digital+camera'))
self.assert_(x.count('?') == 1)
self.assert_(x.count('&') == 2)
self.assert_(x.index('?') < x.index('&'))
self.assert_(x.index('max-results=250') != -1)
self.assert_(x.index('foo=bar') != -1)
def testBuildUriWithoutParameterEscaping(self):
x = atom.service.BuildUri('/base/feeds/snippets',
url_params={'foo': ' bar', 'bq': 'digital camera'},
escape_params=False)
self.assert_(x.index('foo= bar') != -1)
self.assert_(x.index('bq=digital camera') != -1)
def testParseHttpUrl(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'http://www.google.com/service/subservice?name=value')
self.assertEquals(ssl, False)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 80)
self.assertEquals(path, '/service/subservice?name=value')
def testParseHttpUrlWithPort(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'http://www.google.com:12/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, False)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 12)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testParseHttpsUrl(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'https://www.google.com/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, True)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 443)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testParseHttpsUrlWithPort(self):
atom_service = atom.service.AtomService('code.google.com')
self.assertEquals(atom_service.server, 'code.google.com')
(host, port, ssl, path) = atom.service.ProcessUrl(atom_service,
'https://www.google.com:13981/service/subservice?name=value&newname=newvalue')
self.assertEquals(ssl, True)
self.assertEquals(host, 'www.google.com')
self.assertEquals(port, 13981)
self.assert_(path.startswith('/service/subservice?'))
self.assert_(path.find('name=value') >= len('/service/subservice?'))
self.assert_(path.find('newname=newvalue') >= len('/service/subservice?'))
def testSetBasicAuth(self):
client = atom.service.AtomService()
client.UseBasicAuth('foo', 'bar')
token = client.token_store.find_token('http://')
self.assert_(isinstance(token, atom.service.BasicAuthToken))
self.assertEquals(token.auth_header, 'Basic Zm9vOmJhcg==')
client.UseBasicAuth('','')
token = client.token_store.find_token('http://')
self.assert_(isinstance(token, atom.service.BasicAuthToken))
self.assertEquals(token.auth_header, 'Basic Og==')
def testProcessUrlWithStringForService(self):
(server, port, ssl, uri) = atom.service.ProcessUrl(
service='www.google.com', url='/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(port, 80)
self.assertEquals(ssl, False)
self.assert_(uri.startswith('/base/feeds/items'))
client = atom.service.AtomService()
client.server = 'www.google.com'
client.ssl = True
(server, port, ssl, uri) = atom.service.ProcessUrl(
service=client, url='/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(ssl, True)
self.assert_(uri.startswith('/base/feeds/items'))
(server, port, ssl, uri) = atom.service.ProcessUrl(service=None,
url='https://www.google.com/base/feeds/items')
self.assertEquals(server, 'www.google.com')
self.assertEquals(port, 443)
self.assertEquals(ssl, True)
self.assert_(uri.startswith('/base/feeds/items'))
def testHostHeaderContainsNonDefaultPort(self):
client = atom.service.AtomService()
client.http_client.v2_http_client = atom.mock_http_core.EchoHttpClient()
response = client.Get('http://example.com')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
response = client.Get('https://example.com')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
response = client.Get('https://example.com:8080')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:8080')
response = client.Get('http://example.com:1234')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:1234')
def testBadHttpsProxyRaisesRealException(self):
"""Test that real exceptions are raised when there is an error connecting to
a host with an https proxy
"""
client = atom.service.AtomService(server='example.com')
client.server = 'example.com'
os.environ['https_proxy'] = 'http://example.com'
self.assertRaises(atom.http.ProxyError,
atom.service.PrepareConnection, client, 'https://example.com')
def suite():
return conf.build_suite([AtomServiceUnitTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
import gdata.service
import atom.mock_service
gdata.service.http_request_handler = atom.mock_service
class MockRequestTest(unittest.TestCase):
def setUp(self):
self.request_thumbprint = atom.mock_service.MockRequest('GET',
'http://www.google.com',
extra_headers={'Header1':'a', 'Header2':'b'})
def testIsMatch(self):
matching_request = atom.mock_service.MockRequest('GET',
'http://www.google.com', extra_headers={'Header1':'a',
'Header2':'b', 'Header3':'c'})
bad_url = atom.mock_service.MockRequest('GET', 'http://example.com',
extra_headers={'Header1':'a', 'Header2':'b', 'Header3':'c'})
# Should match because we don't check headers at the moment.
bad_header = atom.mock_service.MockRequest('GET',
'http://www.google.com', extra_headers={'Header1':'a',
'Header2':'1', 'Header3':'c'})
bad_verb = atom.mock_service.MockRequest('POST', 'http://www.google.com',
data='post data', extra_headers={'Header1':'a', 'Header2':'b'})
self.assertEquals(self.request_thumbprint.IsMatch(matching_request), True)
self.assertEquals(self.request_thumbprint.IsMatch(bad_url), False)
self.assertEquals(self.request_thumbprint.IsMatch(bad_header), True)
self.assertEquals(self.request_thumbprint.IsMatch(bad_verb), False)
class HttpRequestTest(unittest.TestCase):
def setUp(self):
atom.mock_service.recordings = []
self.client = gdata.service.GDataService()
def testSimpleRecordedGet(self):
recorded_request = atom.mock_service.MockRequest('GET', 'http://example.com/')
recorded_response = atom.mock_service.MockHttpResponse('Got it', 200,
'OK')
# Add a tuple mapping the mock request to the mock response
atom.mock_service.recordings.append((recorded_request, recorded_response))
# Try a couple of GET requests which should match the recorded request.
response = self.client.Get('http://example.com/', converter=str)
self.assertEquals(response, 'Got it')
self.client.server = 'example.com'
raw_response = self.client.handler.HttpRequest(self.client, 'GET', None,
'/')
self.assertEquals(raw_response.read(), 'Got it')
self.assertEquals(raw_response.status, 200)
self.assertEquals(raw_response.reason, 'OK')
class RecordRealHttpRequestsTest(unittest.TestCase):
def testRecordAndReuseResponse(self):
client = gdata.service.GDataService()
client.server = 'www.google.com'
atom.mock_service.recordings = []
atom.mock_service.real_request_handler = atom.service
# Record a response
real_response = atom.mock_service.HttpRequest(client, 'GET', None, 'http://www.google.com/')
# Enter 'replay' mode
atom.mock_service.real_request_handler = None
mock_response = atom.mock_service.HttpRequest(client, 'GET', None, 'http://www.google.com/')
self.assertEquals(real_response.reason, mock_response.reason)
self.assertEquals(real_response.status, mock_response.status)
self.assertEquals(real_response.read(), mock_response.read())
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.mock_http
import atom.http
class MockHttpClientUnitTest(unittest.TestCase):
def setUp(self):
self.client = atom.mock_http.MockHttpClient()
def testRepondToGet(self):
mock_response = atom.http_interface.HttpResponse(body='Hooray!',
status=200, reason='OK')
self.client.add_response(mock_response, 'GET',
'http://example.com/hooray')
response = self.client.request('GET', 'http://example.com/hooray')
self.assertEquals(len(self.client.recordings), 1)
self.assertEquals(response.status, 200)
self.assertEquals(response.read(), 'Hooray!')
def testRecordResponse(self):
# Turn on pass-through record mode.
self.client.real_client = atom.http.ProxiedHttpClient()
live_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
live_response_body = live_response.read()
self.assertEquals(live_response.status, 200)
self.assertEquals(live_response_body.startswith('<?xml'), True)
# Requery for the now canned data.
self.client.real_client = None
canned_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
# The canned response should be the stored response.
canned_response_body = canned_response.read()
self.assertEquals(canned_response.status, 200)
self.assertEquals(canned_response_body, live_response_body)
def testUnrecordedRequest(self):
try:
self.client.request('POST', 'http://example.org')
self.fail()
except atom.mock_http.NoRecordingFound:
pass
def suite():
return unittest.TestSuite(
(unittest.makeSuite(MockHttpClientUnitTest,'test'),))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Perforce Defect Tracking Integration Project
# <http://www.ravenbrook.com/project/p4dti/>
#
# COVERAGE.PY -- COVERAGE TESTING
#
# Gareth Rees, Ravenbrook Limited, 2001-12-04
#
#
# 1. INTRODUCTION
#
# This module provides coverage testing for Python code.
#
# The intended readership is all Python developers.
#
# This document is not confidential.
#
# See [GDR 2001-12-04a] for the command-line interface, programmatic
# interface and limitations. See [GDR 2001-12-04b] for requirements and
# design.
"""Usage:
coverage.py -x MODULE.py [ARG1 ARG2 ...]
Execute module, passing the given command-line arguments, collecting
coverage data.
coverage.py -e
Erase collected coverage data.
coverage.py -r [-m] FILE1 FILE2 ...
Report on the statement coverage for the given files. With the -m
option, show line numbers of the statements that weren't executed.
coverage.py -a [-d dir] FILE1 FILE2 ...
Make annotated copies of the given files, marking statements that
are executed with > and statements that are missed with !. With
the -d option, make the copies in that directory. Without the -d
option, make each copy in the same directory as the original.
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else."""
import os
import re
import string
import sys
import types
# 2. IMPLEMENTATION
#
# This uses the "singleton" pattern.
#
# The word "morf" means a module object (from which the source file can
# be deduced by suitable manipulation of the __file__ attribute) or a
# filename.
#
# When we generate a coverage report we have to canonicalize every
# filename in the coverage dictionary just in case it refers to the
# module we are reporting on. It seems a shame to throw away this
# information so the data in the coverage dictionary is transferred to
# the 'cexecuted' dictionary under the canonical filenames.
#
# The coverage dictionary is called "c" and the trace function "t". The
# reason for these short names is that Python looks up variables by name
# at runtime and so execution time depends on the length of variables!
# In the bottleneck of this application it's appropriate to abbreviate
# names to increase speed.
# A dictionary with an entry for (Python source file name, line number
# in that file) if that line has been executed.
c = {}
# t(f, x, y). This method is passed to sys.settrace as a trace
# function. See [van Rossum 2001-07-20b, 9.2] for an explanation of
# sys.settrace and the arguments and return value of the trace function.
# See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
# objects.
def t(f, x, y):
c[(f.f_code.co_filename, f.f_lineno)] = 1
return t
the_coverage = None
class coverage:
error = "coverage error"
# Name of the cache file (unless environment variable is set).
cache_default = ".coverage"
# Environment variable naming the cache file.
cache_env = "COVERAGE_FILE"
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed.
cexecuted = {}
# Cache of results of calling the analysis() method, so that you can
# specify both -r and -a without doing double work.
analysis_cache = {}
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
canonical_filename_cache = {}
def __init__(self):
global the_coverage
if the_coverage:
raise self.error, "Only one coverage object allowed."
self.cache = os.environ.get(self.cache_env, self.cache_default)
self.restore()
self.analysis_cache = {}
def help(self, error=None):
if error:
print error
print
print __doc__
sys.exit(1)
def command_line(self):
import getopt
settings = {}
optmap = {
'-a': 'annotate',
'-d:': 'directory=',
'-e': 'erase',
'-h': 'help',
'-i': 'ignore-errors',
'-m': 'show-missing',
'-r': 'report',
'-x': 'execute',
}
short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
long_opts = optmap.values()
options, args = getopt.getopt(sys.argv[1:], short_opts,
long_opts)
for o, a in options:
if optmap.has_key(o):
settings[optmap[o]] = 1
elif optmap.has_key(o + ':'):
settings[optmap[o + ':']] = a
elif o[2:] in long_opts:
settings[o[2:]] = 1
elif o[2:] + '=' in long_opts:
settings[o[2:]] = a
else:
self.help("Unknown option: '%s'." % o)
if settings.get('help'):
self.help()
for i in ['erase', 'execute']:
for j in ['annotate', 'report']:
if settings.get(i) and settings.get(j):
self.help("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
args_needed = (settings.get('execute')
or settings.get('annotate')
or settings.get('report'))
action = settings.get('erase') or args_needed
if not action:
self.help("You must specify at least one of -e, -x, -r, "
"or -a.")
if not args_needed and args:
self.help("Unexpected arguments %s." % args)
if settings.get('erase'):
self.erase()
if settings.get('execute'):
if not args:
self.help("Nothing to do.")
sys.argv = args
self.start()
import __main__
# When Python starts a script, sys.path[0] is the directory
# in which the Python script was found. So when we run a
# script, change sys.path so that it matches what the script
# would have found if it had been run normally.
sys.path[0] = os.path.dirname(sys.argv[0])
execfile(sys.argv[0], __main__.__dict__)
if not args:
args = self.cexecuted.keys()
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
directory = settings.get('directory=')
if settings.get('report'):
self.report(args, show_missing, ignore_errors)
if settings.get('annotate'):
self.annotate(args, directory, ignore_errors)
def start(self):
sys.settrace(t)
def stop(self):
sys.settrace(None)
def erase(self):
global c
c = {}
self.analysis_cache = {}
self.cexecuted = {}
if os.path.exists(self.cache):
os.remove(self.cache)
# save(). Save coverage data to the coverage cache.
def save(self):
self.canonicalize_filenames()
cache = open(self.cache, 'wb')
import marshal
marshal.dump(self.cexecuted, cache)
cache.close()
# restore(). Restore coverage data from the coverage cache (if it
# exists).
def restore(self):
global c
c = {}
self.cexecuted = {}
if not os.path.exists(self.cache):
return
try:
cache = open(self.cache, 'rb')
import marshal
cexecuted = marshal.load(cache)
cache.close()
if isinstance(cexecuted, types.DictType):
self.cexecuted = cexecuted
except:
pass
# canonical_filename(filename). Return a canonical filename for the
# file (that is, an absolute path with no redundant components and
# normalized case). See [GDR 2001-12-04b, 3.3].
def canonical_filename(self, filename):
if not self.canonical_filename_cache.has_key(filename):
f = filename
if os.path.isabs(f) and not os.path.exists(f):
f = os.path.basename(f)
if not os.path.isabs(f):
for path in [os.curdir] + sys.path:
g = os.path.join(path, f)
if os.path.exists(g):
f = g
break
cf = os.path.normcase(os.path.abspath(f))
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
# canonicalize_filenames(). Copy results from "executed" to
# "cexecuted", canonicalizing filenames on the way. Clear the
# "executed" map.
def canonicalize_filenames(self):
global c
for filename, lineno in c.keys():
f = self.canonical_filename(filename)
if not self.cexecuted.has_key(f):
self.cexecuted[f] = {}
self.cexecuted[f][lineno] = 1
c = {}
# morf_filename(morf). Return the filename for a module or file.
def morf_filename(self, morf):
if isinstance(morf, types.ModuleType):
if not hasattr(morf, '__file__'):
raise self.error, "Module has no __file__ attribute."
file = morf.__file__
else:
file = morf
return self.canonical_filename(file)
# analyze_morf(morf). Analyze the module or filename passed as
# the argument. If the source code can't be found, raise an error.
# Otherwise, return a pair of (1) the canonical filename of the
# source code for the module, and (2) a list of lines of statements
# in the source code.
def analyze_morf(self, morf):
if self.analysis_cache.has_key(morf):
return self.analysis_cache[morf]
filename = self.morf_filename(morf)
ext = os.path.splitext(filename)[1]
if ext == '.pyc':
if not os.path.exists(filename[0:-1]):
raise self.error, ("No source for compiled code '%s'."
% filename)
filename = filename[0:-1]
elif ext != '.py':
raise self.error, "File '%s' not Python source." % filename
source = open(filename, 'r')
import parser
tree = parser.suite(source.read()).totuple(1)
source.close()
statements = {}
self.find_statements(tree, statements)
lines = statements.keys()
lines.sort()
result = filename, lines
self.analysis_cache[morf] = result
return result
# find_statements(tree, dict). Find each statement in the parse
# tree and record the line on which the statement starts in the
# dictionary (by assigning it to 1).
#
# It works by walking the whole tree depth-first. Every time it
# comes across a statement (symbol.stmt -- this includes compound
# statements like 'if' and 'while') it calls find_statement, which
# descends the tree below the statement to find the first terminal
# token in that statement and record the lines on which that token
# was found.
#
# This algorithm may find some lines several times (because of the
# grammar production statement -> compound statement -> statement),
# but that doesn't matter because we record lines as the keys of the
# dictionary.
#
# See also [GDR 2001-12-04b, 3.2].
def find_statements(self, tree, dict):
import symbol, token
if token.ISNONTERMINAL(tree[0]):
for t in tree[1:]:
self.find_statements(t, dict)
if tree[0] == symbol.stmt:
self.find_statement(tree[1], dict)
elif (tree[0] == token.NAME
and tree[1] in ['elif', 'except', 'finally']):
dict[tree[2]] = 1
def find_statement(self, tree, dict):
import token
while token.ISNONTERMINAL(tree[0]):
tree = tree[1]
dict[tree[2]] = 1
# format_lines(statements, lines). Format a list of line numbers
# for printing by coalescing groups of lines as long as the lines
# represent consecutive statements. This will coalesce even if
# there are gaps between statements, so if statements =
# [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
# format_lines will return "1-2, 5-11, 13-14".
def format_lines(self, statements, lines):
pairs = []
i = 0
j = 0
start = None
pairs = []
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j = j + 1
elif start:
pairs.append((start, end))
start = None
i = i + 1
if start:
pairs.append((start, end))
def stringify(pair):
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
import string
return string.join(map(stringify, pairs), ", ")
def analysis(self, morf):
filename, statements = self.analyze_morf(morf)
self.canonicalize_filenames()
if not self.cexecuted.has_key(filename):
self.cexecuted[filename] = {}
missing = []
for line in statements:
if not self.cexecuted[filename].has_key(line):
missing.append(line)
return (filename, statements, missing,
self.format_lines(statements, missing))
def morf_name(self, morf):
if isinstance(morf, types.ModuleType):
return morf.__name__
else:
return os.path.splitext(os.path.basename(morf))[0]
def report(self, morfs, show_missing=1, ignore_errors=0):
if not isinstance(morfs, types.ListType):
morfs = [morfs]
max_name = max([5,] + map(len, map(self.morf_name, morfs)))
fmt_name = "%%- %ds " % max_name
fmt_err = fmt_name + "%s: %s"
header = fmt_name % "Name" + " Stmts Exec Cover"
fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
if show_missing:
header = header + " Missing"
fmt_coverage = fmt_coverage + " %s"
print header
print "-" * len(header)
total_statements = 0
total_executed = 0
for morf in morfs:
name = self.morf_name(morf)
try:
_, statements, missing, readable = self.analysis(morf)
n = len(statements)
m = n - len(missing)
if n > 0:
pc = 100.0 * m / n
else:
pc = 100.0
args = (name, n, m, pc)
if show_missing:
args = args + (readable,)
print fmt_coverage % args
total_statements = total_statements + n
total_executed = total_executed + m
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
type, msg = sys.exc_info()[0:2]
print fmt_err % (name, type, msg)
if len(morfs) > 1:
print "-" * len(header)
if total_statements > 0:
pc = 100.0 * total_executed / total_statements
else:
pc = 100.0
args = ("TOTAL", total_statements, total_executed, pc)
if show_missing:
args = args + ("",)
print fmt_coverage % args
# annotate(morfs, ignore_errors).
blank_re = re.compile("\\s*(#|$)")
else_re = re.compile("\\s*else\\s*:\\s*(#|$)")
def annotate(self, morfs, directory=None, ignore_errors=0):
for morf in morfs:
try:
filename, statements, missing, _ = self.analysis(morf)
source = open(filename, 'r')
if directory:
dest_file = os.path.join(directory,
os.path.basename(filename)
+ ',cover')
else:
dest_file = filename + ',cover'
dest = open(dest_file, 'w')
lineno = 0
i = 0
j = 0
covered = 1
while 1:
line = source.readline()
if line == '':
break
lineno = lineno + 1
while i < len(statements) and statements[i] < lineno:
i = i + 1
while j < len(missing) and missing[j] < lineno:
j = j + 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only
# 'else:'. See [GDR 2001-12-04b, 3.2].
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
source.close()
dest.close()
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
raise
# Singleton object.
the_coverage = coverage()
# Module functions call methods in the singleton object.
def start(*args, **kw): return apply(the_coverage.start, args, kw)
def stop(*args, **kw): return apply(the_coverage.stop, args, kw)
def erase(*args, **kw): return apply(the_coverage.erase, args, kw)
def analysis(*args, **kw): return apply(the_coverage.analysis, args, kw)
def report(*args, **kw): return apply(the_coverage.report, args, kw)
# Save coverage data when Python exits. (The atexit module wasn't
# introduced until Python 2.0, so use sys.exitfunc when it's not
# available.)
try:
import atexit
atexit.register(the_coverage.save)
except ImportError:
sys.exitfunc = the_coverage.save
# Command-line interface.
if __name__ == '__main__':
the_coverage.command_line()
# A. REFERENCES
#
# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees;
# Ravenbrook Limited; 2001-12-04;
# <http://garethrees.org/2001/12/04/python-coverage/>.
#
# [GDR 2001-12-04b] "Statement coverage for Python: design and
# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04;
# <http://garethrees.org/2001/12/04/python-coverage/design.html>.
#
# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)";
# Guide van Rossum; 2001-07-20;
# <http://www.python.org/doc/2.1.1/ref/ref.html>.
#
# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum;
# 2001-07-20; <http://www.python.org/doc/2.1.1/lib/lib.html>.
#
#
# B. DOCUMENT HISTORY
#
# 2001-12-04 GDR Created.
#
# 2001-12-06 GDR Added command-line interface and source code
# annotation.
#
# 2001-12-09 GDR Moved design and interface to separate documents.
#
# 2001-12-10 GDR Open cache file as binary on Windows. Allow
# simultaneous -e and -x, or -a and -r.
#
# 2001-12-12 GDR Added command-line help. Cache analysis so that it
# only needs to be done once when you specify -a and -r.
#
# 2001-12-13 GDR Improved speed while recording. Portable between
# Python 1.5.2 and 2.1.1.
#
# 2002-01-03 GDR Module-level functions work correctly.
#
# 2002-01-07 GDR Update sys.path when running a file with the -x option,
# so that it matches the value the program would get if it were run on
# its own.
#
#
# C. COPYRIGHT AND LICENCE
#
# Copyright 2001 Gareth Rees. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
#
#
# $Id: //info.ravenbrook.com/user/gdr/www.garethrees.org/2001/12/04/python-coverage/coverage.py#9 $
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata
import atom
from gdata import test_data
import gdata.test_config as conf
class StartIndexTest(unittest.TestCase):
def setUp(self):
self.start_index = gdata.StartIndex()
def testToAndFromString(self):
self.start_index.text = '1'
self.assert_(self.start_index.text == '1')
new_start_index = gdata.StartIndexFromString(self.start_index.ToString())
self.assert_(self.start_index.text == new_start_index.text)
class ItemsPerPageTest(unittest.TestCase):
def setUp(self):
self.items_per_page = gdata.ItemsPerPage()
def testToAndFromString(self):
self.items_per_page.text = '10'
self.assert_(self.items_per_page.text == '10')
new_items_per_page = gdata.ItemsPerPageFromString(
self.items_per_page.ToString())
self.assert_(self.items_per_page.text == new_items_per_page.text)
class GDataEntryTest(unittest.TestCase):
def testIdShouldBeCleaned(self):
entry = gdata.GDataEntryFromString(test_data.XML_ENTRY_1)
element_tree = ElementTree.fromstring(test_data.XML_ENTRY_1)
self.assert_(element_tree.findall(
'{http://www.w3.org/2005/Atom}id')[0].text != entry.id.text)
self.assert_(entry.id.text == 'http://www.google.com/test/id/url')
def testGeneratorShouldBeCleaned(self):
feed = gdata.GDataFeedFromString(test_data.GBASE_FEED)
element_tree = ElementTree.fromstring(test_data.GBASE_FEED)
self.assert_(element_tree.findall('{http://www.w3.org/2005/Atom}generator'
)[0].text != feed.generator.text)
self.assert_(feed.generator.text == 'GoogleBase')
def testAllowsEmptyId(self):
entry = gdata.GDataEntry()
try:
entry.id = atom.Id()
except AttributeError:
self.fail('Empty id should not raise an attribute error.')
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = gdata.GDataEntryFromString(test_data.XML_ENTRY_1)
def testLinkFinderGetsLicenseLink(self):
self.assertEquals(isinstance(self.entry.GetLicenseLink(), atom.Link),
True)
self.assertEquals(self.entry.GetLicenseLink().href,
'http://creativecommons.org/licenses/by-nc/2.5/rdf')
self.assertEquals(self.entry.GetLicenseLink().rel, 'license')
def testLinkFinderGetsAlternateLink(self):
self.assertEquals(isinstance(self.entry.GetAlternateLink(), atom.Link),
True)
self.assertEquals(self.entry.GetAlternateLink().href,
'http://www.provider-host.com/123456789')
self.assertEquals(self.entry.GetAlternateLink().rel, 'alternate')
class GDataFeedTest(unittest.TestCase):
def testCorrectConversionToElementTree(self):
test_feed = gdata.GDataFeedFromString(test_data.GBASE_FEED)
self.assert_(test_feed.total_results is not None)
element_tree = test_feed._ToElementTree()
feed = element_tree.find('{http://www.w3.org/2005/Atom}feed')
self.assert_(element_tree.find(
'{http://a9.com/-/spec/opensearchrss/1.0/}totalResults') is not None)
def testAllowsEmptyId(self):
feed = gdata.GDataFeed()
try:
feed.id = atom.Id()
except AttributeError:
self.fail('Empty id should not raise an attribute error.')
class BatchEntryTest(unittest.TestCase):
def testCorrectConversionFromAndToString(self):
batch_entry = gdata.BatchEntryFromString(test_data.BATCH_ENTRY)
self.assertEquals(batch_entry.batch_id.text, 'itemB')
self.assertEquals(batch_entry.id.text,
'http://www.google.com/base/feeds/items/'
'2173859253842813008')
self.assertEquals(batch_entry.batch_operation.type, 'insert')
self.assertEquals(batch_entry.batch_status.code, '201')
self.assertEquals(batch_entry.batch_status.reason, 'Created')
new_entry = gdata.BatchEntryFromString(str(batch_entry))
self.assertEquals(batch_entry.batch_id.text, new_entry.batch_id.text)
self.assertEquals(batch_entry.id.text, new_entry.id.text)
self.assertEquals(batch_entry.batch_operation.type,
new_entry.batch_operation.type)
self.assertEquals(batch_entry.batch_status.code,
new_entry.batch_status.code)
self.assertEquals(batch_entry.batch_status.reason,
new_entry.batch_status.reason)
class BatchFeedTest(unittest.TestCase):
def setUp(self):
self.batch_feed = gdata.BatchFeed()
self.example_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test')
def testConvertRequestFeed(self):
batch_feed = gdata.BatchFeedFromString(test_data.BATCH_FEED_REQUEST)
self.assertEquals(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
self.assertEquals(batch_feed.title.text, 'My Batch Feed')
new_feed = gdata.BatchFeedFromString(str(batch_feed))
self.assertEquals(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
self.assertEquals(new_feed.title.text, 'My Batch Feed')
def testConvertResultFeed(self):
batch_feed = gdata.BatchFeedFromString(test_data.BATCH_FEED_RESULT)
self.assertEquals(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
if entry.id.text == ('http://www.google.com/base/feeds/items/'
'2173859253842813008'):
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_id.text, 'itemB')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(batch_feed.title.text, 'My Batch')
new_feed = gdata.BatchFeedFromString(str(batch_feed))
self.assertEquals(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assert_(isinstance(entry, gdata.BatchEntry))
if entry.id.text == ('http://www.google.com/base/feeds/items/'
'2173859253842813008'):
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_id.text, 'itemB')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(new_feed.title.text, 'My Batch')
def testAddBatchEntry(self):
try:
self.batch_feed.AddBatchEntry(batch_id_string='a')
self.fail('AddBatchEntry with neither entry or URL should raise Error')
except gdata.MissingRequiredParameters:
pass
new_entry = self.batch_feed.AddBatchEntry(
id_url_string='http://example.com/1')
self.assertEquals(len(self.batch_feed.entry), 1)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0')
self.assertEquals(new_entry.id.text, 'http://example.com/1')
self.assertEquals(new_entry.batch_id.text, '0')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
batch_id_string='foo')
self.assertEquals(new_entry.batch_id.text, 'foo')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'),
batch_id=gdata.BatchId(text='bar'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId',
batch_id_string='foo')
self.assertEquals(new_entry.batch_id.text, 'foo')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'),
batch_id=gdata.BatchId(text='bar'))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId')
self.assertEquals(new_entry.batch_id.text, 'bar')
self.assertEquals(new_entry.id.text, 'originalId')
to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'),
batch_id=gdata.BatchId(text='bar'),
batch_operation=gdata.BatchOperation(
op_type=gdata.BATCH_INSERT))
self.assertEquals(to_add.batch_operation.type, gdata.BATCH_INSERT)
new_entry = self.batch_feed.AddBatchEntry(entry=to_add,
id_url_string='newId', batch_id_string='foo',
operation_string=gdata.BATCH_UPDATE)
self.assertEquals(new_entry.batch_operation.type, gdata.BATCH_UPDATE)
def testAddInsert(self):
first_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test1')
self.batch_feed.AddInsert(first_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0')
second_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/2'), text='This is a test2')
self.batch_feed.AddInsert(second_entry, batch_id_string='foo')
self.assertEquals(self.batch_feed.entry[1].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[1].batch_id.text, 'foo')
third_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/3'), text='This is a test3')
third_entry.batch_operation = gdata.BatchOperation(
op_type=gdata.BATCH_DELETE)
# Add an entry with a delete operation already assigned.
self.batch_feed.AddInsert(third_entry)
# The batch entry should not have the original operation, it should
# have been changed to an insert.
self.assertEquals(self.batch_feed.entry[2].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(self.batch_feed.entry[2].batch_id.text, '2')
def testAddDelete(self):
# Try deleting an entry
delete_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test')
self.batch_feed.AddDelete(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].text, 'This is a test')
# Try deleting a URL
self.batch_feed.AddDelete(url_string='http://example.com/2')
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEquals(self.batch_feed.entry[1].id.text,
'http://example.com/2')
self.assert_(self.batch_feed.entry[1].text is None)
def testAddQuery(self):
# Try querying with an existing batch entry
delete_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'))
self.batch_feed.AddQuery(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_QUERY)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
# Try querying a URL
self.batch_feed.AddQuery(url_string='http://example.com/2')
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_QUERY)
self.assertEquals(self.batch_feed.entry[1].id.text,
'http://example.com/2')
def testAddUpdate(self):
# Try updating an entry
delete_entry = gdata.BatchEntry(
atom_id=atom.Id(text='http://example.com/1'), text='This is a test')
self.batch_feed.AddUpdate(entry=delete_entry)
self.assertEquals(self.batch_feed.entry[0].batch_operation.type,
gdata.BATCH_UPDATE)
self.assertEquals(self.batch_feed.entry[0].id.text,
'http://example.com/1')
self.assertEquals(self.batch_feed.entry[0].text, 'This is a test')
class ExtendedPropertyTest(unittest.TestCase):
def testXmlBlobRoundTrip(self):
ep = gdata.ExtendedProperty(name='blobby')
ep.SetXmlBlob('<some_xml attr="test"/>')
extension = ep.GetXmlBlobExtensionElement()
self.assertEquals(extension.tag, 'some_xml')
self.assert_(extension.namespace is None)
self.assertEquals(extension.attributes['attr'], 'test')
ep2 = gdata.ExtendedPropertyFromString(ep.ToString())
extension = ep2.GetXmlBlobExtensionElement()
self.assertEquals(extension.tag, 'some_xml')
self.assert_(extension.namespace is None)
self.assertEquals(extension.attributes['attr'], 'test')
def testGettersShouldReturnNoneWithNoBlob(self):
ep = gdata.ExtendedProperty(name='no blob')
self.assert_(ep.GetXmlBlobExtensionElement() is None)
self.assert_(ep.GetXmlBlobString() is None)
def testGettersReturnCorrectTypes(self):
ep = gdata.ExtendedProperty(name='has blob')
ep.SetXmlBlob('<some_xml attr="test"/>')
self.assert_(isinstance(ep.GetXmlBlobExtensionElement(),
atom.ExtensionElement))
self.assert_(isinstance(ep.GetXmlBlobString(), str))
class FeedLinkTest(unittest.TestCase):
def testCorrectFromStringType(self):
link = gdata.FeedLinkFromString(
'<feedLink xmlns="http://schemas.google.com/g/2005" countHint="5"/>')
self.assert_(isinstance(link, gdata.FeedLink))
self.assertEqual(link.count_hint, '5')
def suite():
return conf.build_suite([StartIndexTest, StartIndexTest, GDataEntryTest,
LinkFinderTest, GDataFeedTest, BatchEntryTest, BatchFeedTest,
ExtendedPropertyTest, FeedLinkTest])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
import gdata.test_config as conf
conf.options.set_value('runlive', 'true')
conf.options.set_value('savecache', 'true')
conf.options.set_value('clearcache', 'false')
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder@gmail.com (Jeff Scudder)'
import unittest
class ModuleTestRunner(object):
def __init__(self, module_list=None, module_settings=None):
"""Constructor for a runner to run tests in the modules listed.
Args:
module_list: list (optional) The modules whose test cases will be run.
module_settings: dict (optional) A dictionary of module level varables
which should be set in the modules if they are present. An
example is the username and password which is a module variable
in most service_test modules.
"""
self.modules = module_list or []
self.settings = module_settings or {}
def RunAllTests(self):
"""Executes all tests in this objects modules list.
It also sets any module variables which match the settings keys to the
corresponding values in the settings member.
"""
runner = unittest.TextTestRunner()
for module in self.modules:
# Set any module variables according to the contents in the settings
for setting, value in self.settings.iteritems():
try:
setattr(module, setting, value)
except AttributeError:
# This module did not have a variable for the current setting, so
# we skip it and try the next setting.
pass
# We have set all of the applicable settings for the module, now
# run the tests.
print '\nRunning all tests in module', module.__name__
runner.run(unittest.defaultTestLoader.loadTestsFromModule(module))
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
import gdata.test_config as conf
conf.options.set_value('runlive', 'false')
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
from gdata.test_config import settings
settings.RUN_LIVE_TESTS = True
settings.CACHE_RESPONSES = True
settings.CLEAR_CACHE = True
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
# Tests for v2 features.
import atom_tests.core_test
import atom_tests.data_test
import atom_tests.http_core_test
import atom_tests.auth_test
import atom_tests.mock_http_core_test
import atom_tests.client_test
import gdata_tests.client_test
import gdata_tests.core_test
import gdata_tests.data_test
import gdata_tests.data_smoke_test
import gdata_tests.client_smoke_test
import gdata_tests.live_client_test
import gdata_tests.gauth_test
import gdata_tests.blogger.data_test
import gdata_tests.blogger.live_client_test
import gdata_tests.spreadsheets.data_test
import gdata_tests.spreadsheets.live_client_test
import gdata_tests.projecthosting.data_test
import gdata_tests.projecthosting.live_client_test
import gdata_tests.sites.data_test
import gdata_tests.sites.live_client_test
import gdata_tests.analytics.data_test
import gdata_tests.analytics.live_client_test
import gdata_tests.contacts.live_client_test
import gdata_tests.contacts.profiles.live_client_test
import gdata_tests.calendar_resource.live_client_test
import gdata_tests.calendar_resource.data_test
import gdata_tests.apps.emailsettings.data_test
import gdata_tests.apps.emailsettings.live_client_test
import gdata_tests.youtube.live_client_test
def suite():
return unittest.TestSuite((
gdata_tests.contacts.profiles.live_client_test.suite(),
atom_tests.core_test.suite(),
atom_tests.data_test.suite(),
atom_tests.http_core_test.suite(),
atom_tests.auth_test.suite(),
atom_tests.mock_http_core_test.suite(),
atom_tests.client_test.suite(),
gdata_tests.client_test.suite(),
gdata_tests.core_test.suite(),
gdata_tests.data_test.suite(),
gdata_tests.data_smoke_test.suite(),
gdata_tests.client_smoke_test.suite(),
gdata_tests.live_client_test.suite(),
gdata_tests.gauth_test.suite(),
gdata_tests.blogger.data_test.suite(),
gdata_tests.blogger.live_client_test.suite(),
gdata_tests.spreadsheets.data_test.suite(),
gdata_tests.spreadsheets.live_client_test.suite(),
gdata_tests.projecthosting.data_test.suite(),
gdata_tests.projecthosting.live_client_test.suite(),
gdata_tests.sites.data_test.suite(),
gdata_tests.sites.live_client_test.suite(),
gdata_tests.analytics.data_test.suite(),
gdata_tests.analytics.live_client_test.suite(),
gdata_tests.contacts.live_client_test.suite(),
gdata_tests.calendar_resource.live_client_test.suite(),
gdata_tests.calendar_resource.data_test.suite(),
gdata_tests.apps.emailsettings.live_client_test.suite(),
gdata_tests.apps.emailsettings.data_test.suite(),
gdata_tests.youtube.live_client_test.suite(),
))
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| Python |
import os.path, sqlite3
u = ''
p = ''
def prompt(x):
print(x)
global u
global p
u = raw_input("Username: ")
p = raw_input("Password: ")
p2 = raw_input("Repeat password: ")
if(p!=p2):
prompt("Error while inputing")
if(os.path.isfile(os.getcwd()+"\\scout.db")):
exit()
else:
print("--zScouting Server V0.1--")
print("Created By: Zach Geis")
print("--------------------------\n")
print("Setting up zScouting Server")
print("Creating database 'scout.db'")
conn = sqlite3.connect('scout.db')
c = conn.cursor()
print("Database creation successful!")
prompt("Enter admin account information:")
print("Structuring database")
#create database after this line
c.execute('''create table users
(username text, password text, groupid text)''')
c.execute('''create table teams
(Number text, Name text, Location text, Description text, MiniBot_YN text)''')
c.execute('''create table matches
(Number text, Location text, Randomness text)''')
c.execute('''create table comps
(Number text, Location text, Randomness text)''')
print("Basic structure created!")
c.execute("insert into users values ('"+u+"','"+p+"', 'admin')")
print("Commiting changes to disk")
conn.commit()
print("Setup successful!")
c.close()
exit()
| Python |
import os
import sqlite3
import socket
import hashlib
from bottle import route, run, template, static_file, get, post, request, redirect, response
#m = hashlib.md5()
#m.update("test")
#md5 = m.hexdigest()
#print(md5)
#%for i in range(1,10):
# {{"templated code"}}
#%end
#request.forms is a dictionary!
conn = sqlite3.connect('scout.db')
c = conn.cursor()
@get('/login')
def login():
return template('login')
@post('/auth', method='POST')
def check_creds():
#socket.gethostbyname(socket.gethostname())
username = request.forms.get('uname')
password = request.forms.get('pword')
try:
c.execute('select * from users where username="'+username+'" and password="'+password+'"')
except:
redirect("/login")
if c.fetchone() is None:
redirect("/login")
else:
#successful here!
#add expires expires a datetime object or unix timestamp
#change test to md5 of username+password
m = hashlib.md5()
m.update(username+password)
cookiemd5val = m.hexdigest()
response.set_cookie("z_auth", cookiemd5val, max_age=6000)
response.set_cookie("z_name", username, max_age=6000)
redirect("/")
return "Redirecting..."
#add auth require here to certian pages
def auth_require():
try:
c.execute('select * from users where username="'+request.get_cookie("z_name")+'"')
m = hashlib.md5()
arrayvals = c.fetchone()
us = arrayvals[0]
pa = arrayvals[1]
m.update(us+pa)
cookiemd5 = m.hexdigest()
if(request.get_cookie("z_auth")==cookiemd5):
return True
else:
redirect("/login")
return False
except:
redirect("/login")
@route('/static/:filename')
def server_static(filename):
return static_file(filename, root=os.getcwd()+'/staticfiles/')
@post('/search', method='GET')
def search():
return request.GET.get('search')
def index(a,b):
auth_require();
#a for highlited nav
#b for content
return template('home',cont=b,place=a)
#add main pages after this line
@route('/home')
@route('/index')
@route('/')
def root():
content = '''
zScouting System V.01 brought to you by Team 537 Charger Robotics! </br>
Developed By: Zach Geis ©
'''
return index(1,content)
@route('/teams')
def teams():
content = "Teams Page"
return index(2,content)
@route('/addmatchdata')
def amd():
content = "Add Match Data"
return index(3,content)
@route('/printscoutingreport')
def psr():
content = "Print Scouting Report"
return index(4,content)
#Below this line place all service page related functions
def serviceButtons():
return '''
<div class="buttons">
<a href="/adduser" class="service">Add User</a></br></br></br>
<a href="/addteam" class="service">Add Team</a></br></br></br>
<a href="/addmatch" class="service">Add Match</a></br></br></br>
<a href="/addcomp" class="service">Add Competition</a></br></br></br>
</div>
'''
@route('/services')
def services():
content = serviceButtons()
return index(5,content)
@get('/adduser')
def usercreds():
content='''
<form class="serviceform" method="POST" action="/adduser">
<table class="form">
<tr><td class="title" colspan="2">Add User</td></tr>
<tr>
<td class="section">Username: </td>
<td class="section"><input type="text" name="uname" /></td>
</tr>
<tr>
<td class="section">Password: </td>
<td class="section"><input type="password" name="pword" /></td>
</tr>
<tr>
<td class="section">Retype Password: </td>
<td class="section"><input type="password" name="pword" /></td>
</tr>
<tr><td class="section"></td><td class="section"> <div class="buttons"> <button class="positive" type="submit">Create</button></div></td></tr>
</table>
</form>
'''
content+=serviceButtons()
return index(5, content)
@post('/adduser', method='POST')
def adduser():
c.execute("insert into users values ('"+request.forms.get('uname')+"','"+request.forms.get('pword')+"', 'admin')")
conn.commit()
return index(5,"Successful add!")
@route('/addteam')
def teamcreds():
c.execute("PRAGMA table_info(teams)")
dbtables = c.fetchall()
formtables = []
print dbtables
for i in range(len(dbtables)):
formtables.insert(i,str(dbtables[i][1]))
content=zform("serviceform", "Add Team", formtables, "Create", "POST", "/addteam","Team ").getform()
content+=serviceButtons()
return index(5, content)
@route('/addteam', method='POST')
def addteam():
c.execute("PRAGMA table_info(teams)")
dbtables = c.fetchall()
formtables = []
print dbtables
for i in range(len(dbtables)):
formtables.insert(i,str(dbtables[i][1]))
sqlq = "insert into teams values ("
for i in range(len(request.forms.items())):
sqlq += "'"+str(request.forms.get(formtables[i]))+"',"
sqlq = sqlq[0:len(sqlq)-1]+")"
print str(sqlq)
c.execute(sqlq)
conn.commit()
return sqlq
@route('/addmatch')
def matchcreds():
c.execute("PRAGMA table_info(matches)")
dbtables = c.fetchall()
formtables = []
print dbtables
for i in range(len(dbtables)):
formtables.insert(i,str(dbtables[i][1]))
content=zform("serviceform", "Add Match", formtables, "Create", "POST", "/addmatch","").getform()
content+=serviceButtons()
return index(5, content)
@route('/addmatch', method='POST')
def addmatch():
c.execute("PRAGMA table_info(matches)")
dbtables = c.fetchall()
formtables = []
print dbtables
for i in range(len(dbtables)):
formtables.insert(i,str(dbtables[i][1]))
sqlq = "insert into matches values ("
for i in range(len(request.forms.items())):
sqlq += "'"+str(request.forms.get(formtables[i]))+"',"
sqlq = sqlq[0:len(sqlq)-1]+")"
print str(sqlq)
c.execute(sqlq)
conn.commit()
return sqlq
@route('/addcomp')
def compcreds():
c.execute("PRAGMA table_info(comps)")
dbtables = c.fetchall()
formtables = []
print dbtables
for i in range(len(dbtables)):
formtables.insert(i,str(dbtables[i][1]))
content=zform("serviceform", "Add Competition", formtables, "Create", "POST", "/addcomp","").getform()
content+=serviceButtons()
return index(5, content)
@route('/addcomp', method='POST')
def addcomp():
c.execute("PRAGMA table_info(comps)")
dbtables = c.fetchall()
formtables = []
print dbtables
for i in range(len(dbtables)):
formtables.insert(i,str(dbtables[i][1]))
sqlq = "insert into comps values ("
for i in range(len(request.forms.items())):
sqlq += "'"+str(request.forms.get(formtables[i]))+"',"
sqlq = sqlq[0:len(sqlq)-1]+")"
print str(sqlq)
c.execute(sqlq)
conn.commit()
return sqlq
class zform:
def __init__(self, txtclass, txttitle, arfield, txtsub, txtmethod, txtaction, prefix):
self.form = ''
self.form += '<form class="'+txtclass+'" method="'+txtmethod+'" action="'+txtaction+'">'
self.form +='<table class="form">'
self.form +='<tr><td class="title" colspan="2">'+txttitle+'</td></tr>'
for i in range(len(arfield)):
self.form +='<tr>'
self.form +='<td class="section">'+prefix+arfield[i]+':</td>'
self.form +='<td class="section"><input type="text" name="'+arfield[i]+'" /></td>'
self.form +='<tr><td class="section"></td><td class="section"> <div class="buttons"> <button class="positive" type="submit">'+txtsub+'</button></div></td></tr>'
self.form +='</table>'
self.form +='</form>'
def getform(self):
return self.form
#super early work in progess what ever is before alpha :/
class zTable:
def __init__(self, ardata, prefix):
self.form = ''
self.form +='<table class="form">'
self.form +='<tr><td class="title" colspan="2">'+txttitle+'</td></tr>'
for i in range(len(ardata)):
self.form +='<tr>'
self.form +='<td class="section">'+prefix+ardata[i]+':</td>'
self.form +='<td class="section"><input type="text" name="'+ardata[i]+'" /></td>'
self.form +='</table>'
def gettable(self):
return self.form
@route('/logout')
def logout():
response.set_cookie("z_auth", "0")
response.set_cookie("z_name", "0")
redirect("/login")
return 0;
#ip local str(socket.gethostbyname(socket.gethostname()))
run(host='localhost', port=8080)
| Python |
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottle.paws.de/
Licence (MIT)
-------------
Copyright (c) 2009, Marcel Hellkamp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Example
-------
This is an example::
from bottle import route, run, request, response, static_file, abort
@route('/')
def hello_world():
return 'Hello World!'
@route('/hello/:name')
def hello_name(name):
return 'Hello %s!' % name
@route('/hello', method='POST')
def hello_post():
name = request.POST['name']
return 'Hello %s!' % name
@route('/static/:filename#.*#')
def static(filename):
return static_file(filename, root='/path/to/static/files/')
run(host='localhost', port=8080)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.8.5'
__license__ = 'MIT'
import base64
import cgi
import email.utils
import functools
import hmac
import inspect
import itertools
import mimetypes
import os
import re
import subprocess
import sys
import thread
import threading
import time
import tokenize
import tempfile
from Cookie import SimpleCookie
from tempfile import TemporaryFile
from traceback import format_exc
from urllib import quote as urlquote
from urlparse import urlunsplit, urljoin
try:
from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover
from UserDict import DictMixin
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try:
try:
from json import dumps as json_dumps
except ImportError: # pragma: no cover
from simplejson import dumps as json_dumps
except ImportError: # pragma: no cover
json_dumps = None
if sys.version_info >= (3,0,0): # pragma: no cover
# See Request.POST
from io import BytesIO
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes the
wrapped buffer. This subclass keeps it open. '''
def close(self): pass
StringType = bytes
def touni(x, enc='utf8'): # Convert anything to unicode (py3)
return str(x, encoding=enc) if isinstance(x, bytes) else str(x)
else:
from StringIO import StringIO as BytesIO
from types import StringType
NCTextIOWrapper = None
def touni(x, enc='utf8'): # Convert anything to unicode (py2)
return x if isinstance(x, unicode) else unicode(str(x), encoding=enc)
def tob(data, enc='utf8'): # Convert strings to bytes (py2 and py3)
return data.encode(enc) if isinstance(data, unicode) else data
# Background compatibility
import warnings
def depr(message, critical=False):
if critical: raise DeprecationWarning(message)
warnings.warn(message, DeprecationWarning, stacklevel=3)
# Exceptions and Events
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.iterallitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None, traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return ''.join(ERROR_PAGE_TEMPLATE.render(e=self))
# Routing
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been build """
class Route(object):
''' Represents a single route and can parse the dynamic route syntax '''
syntax = re.compile(r'(.*?)(?<!\\):([a-zA-Z_]+)?(?:#(.*?)#)?')
default = '[^/]+'
def __init__(self, route, target=None, name=None, static=False):
""" Create a Route. The route string may contain `:key`,
`:key#regexp#` or `:#regexp#` tokens for each dynamic part of the
route. These can be escaped with a backslash infront of the `:`
and are compleately ignored if static is true. A name may be used
to refer to this route later (depends on Router)
"""
self.route = route
self.target = target
self.name = name
if static:
self.route = self.route.replace(':','\\:')
self._tokens = None
def tokens(self):
""" Return a list of (type, value) tokens. """
if not self._tokens:
self._tokens = list(self.tokenise(self.route))
return self._tokens
@classmethod
def tokenise(cls, route):
''' Split a string into an iterator of (type, value) tokens. '''
match = None
for match in cls.syntax.finditer(route):
pre, name, rex = match.groups()
if pre: yield ('TXT', pre.replace('\\:',':'))
if rex and name: yield ('VAR', (rex, name))
elif name: yield ('VAR', (cls.default, name))
elif rex: yield ('ANON', rex)
if not match:
yield ('TXT', route.replace('\\:',':'))
elif match.end() < len(route):
yield ('TXT', route[match.end():].replace('\\:',':'))
def group_re(self):
''' Return a regexp pattern with named groups '''
out = ''
for token, data in self.tokens():
if token == 'TXT': out += re.escape(data)
elif token == 'VAR': out += '(?P<%s>%s)' % (data[1], data[0])
elif token == 'ANON': out += '(?:%s)' % data
return out
def flat_re(self):
''' Return a regexp pattern with non-grouping parentheses '''
rf = lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
return re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', rf, self.group_re())
def format_str(self):
''' Return a format string with named fields. '''
out, i = '', 0
for token, value in self.tokens():
if token == 'TXT': out += value.replace('%','%%')
elif token == 'ANON': out += '%%(anon%d)s' % i; i+=1
elif token == 'VAR': out += '%%(%s)s' % value[1]
return out
@property
def static(self):
return not self.is_dynamic()
def is_dynamic(self):
''' Return true if the route contains dynamic parts '''
for token, value in self.tokens():
if token != 'TXT':
return True
return False
def __repr__(self):
return "<Route(%s) />" % repr(self.route)
def __eq__(self, other):
return self.route == other.route
class Router(object):
''' A route associates a string (e.g. URL) with an object (e.g. function)
Some dynamic routes may extract parts of the string and provide them as
a dictionary. This router matches a string against multiple routes and
returns the associated object along with the extracted data.
'''
def __init__(self):
self.routes = [] # List of all installed routes
self.named = {} # Cache for named routes and their format strings
self.static = {} # Cache for static routes
self.dynamic = [] # Search structure for dynamic routes
def add(self, route, target=None, **ka):
""" Add a route->target pair or a :class:`Route` object to the Router.
Return the Route object. See :class:`Route` for details.
"""
if not isinstance(route, Route):
route = Route(route, target, **ka)
if self.get_route(route):
return RouteError('Route %s is not uniqe.' % route)
self.routes.append(route)
return route
def get_route(self, route, target=None, **ka):
''' Get a route from the router by specifying either the same
parameters as in :meth:`add` or comparing to an instance of
:class:`Route`. Note that not all parameters are considered by the
compare function. '''
if not isinstance(route, Route):
route = Route(route, **ka)
for known in self.routes:
if route == known:
return known
return None
def match(self, uri):
''' Match an URI and return a (target, urlargs) tuple '''
if uri in self.static:
return self.static[uri], {}
for combined, subroutes in self.dynamic:
match = combined.match(uri)
if not match: continue
target, args_re = subroutes[match.lastindex - 1]
args = args_re.match(uri).groupdict() if args_re else {}
return target, args
return None, {}
def build(self, _name, **args):
''' Build an URI out of a named route and values for te wildcards. '''
try:
return self.named[_name] % args
except KeyError:
raise RouteBuildError("No route found with name '%s'." % _name)
def compile(self):
''' Build the search structures. Call this before actually using the
router.'''
self.named = {}
self.static = {}
self.dynamic = []
for route in self.routes:
if route.name:
self.named[route.name] = route.format_str()
if route.static:
self.static[route.route] = route.target
continue
gpatt = route.group_re()
fpatt = route.flat_re()
try:
gregexp = re.compile('^(%s)$' % gpatt) if '(?P' in gpatt else None
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, fpatt)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((route.target, gregexp))
except (AssertionError, IndexError), e: # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)'%fpatt),[(route.target, gregexp)]))
except re.error, e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (route, e))
def __eq__(self, other):
return self.routes == other.routes
# WSGI abstraction: Application, Request and Response objects
class Bottle(object):
""" WSGI application """
def __init__(self, catchall=True, autojson=True, config=None):
""" Create a new bottle instance.
You usually don't do that. Use `bottle.app.push()` instead.
"""
self.routes = Router()
self.mounts = {}
self.error_handler = {}
self.catchall = catchall
self.config = config or {}
self.serve = True
self.castfilter = []
if autojson and json_dumps:
self.add_filter(dict, dict2json)
def optimize(self, *a, **ka):
depr("Bottle.optimize() is obsolete.")
def mount(self, app, script_path):
''' Mount a Bottle application to a specific URL prefix '''
if not isinstance(app, Bottle):
raise TypeError('Only Bottle instances are supported for now.')
script_path = '/'.join(filter(None, script_path.split('/')))
path_depth = script_path.count('/') + 1
if not script_path:
raise TypeError('Empty script_path. Perhaps you want a merge()?')
for other in self.mounts:
if other.startswith(script_path):
raise TypeError('Conflict with existing mount: %s' % other)
@self.route('/%s/:#.*#' % script_path, method="ANY")
def mountpoint():
request.path_shift(path_depth)
return app.handle(request.path, request.method)
self.mounts[script_path] = app
def add_filter(self, ftype, func):
''' Register a new output filter. Whenever bottle hits a handler output
matching `ftype`, `func` is applied to it. '''
if not isinstance(ftype, type):
raise TypeError("Expected type object, got %s" % type(ftype))
self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype]
self.castfilter.append((ftype, func))
self.castfilter.sort()
def match_url(self, path, method='GET'):
""" Find a callback bound to a path and a specific HTTP method.
Return (callback, param) tuple or raise HTTPError.
method: HEAD falls back to GET. All methods fall back to ANY.
"""
path, method = path.strip().lstrip('/'), method.upper()
callbacks, args = self.routes.match(path)
if not callbacks:
raise HTTPError(404, "Not found: " + path)
if method in callbacks:
return callbacks[method], args
if method == 'HEAD' and 'GET' in callbacks:
return callbacks['GET'], args
if 'ANY' in callbacks:
return callbacks['ANY'], args
allow = [m for m in callbacks if m != 'ANY']
if 'GET' in allow and 'HEAD' not in allow:
allow.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allow))])
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.routes.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def route(self, path=None, method='GET', **kargs):
""" Decorator: bind a function to a GET request path.
If the path parameter is None, the signature of the decorated
function is used to generate the paths. See yieldroutes()
for details.
The method parameter (default: GET) specifies the HTTP request
method to listen to. You can specify a list of methods too.
"""
def wrapper(callback):
routes = [path] if path else yieldroutes(callback)
methods = method.split(';') if isinstance(method, str) else method
for r in routes:
for m in methods:
r, m = r.strip().lstrip('/'), m.strip().upper()
old = self.routes.get_route(r, **kargs)
if old:
old.target[m] = callback
else:
self.routes.add(r, {m: callback}, **kargs)
self.routes.compile()
return callback
return wrapper
def get(self, path=None, method='GET', **kargs):
""" Decorator: Bind a function to a GET request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def post(self, path=None, method='POST', **kargs):
""" Decorator: Bind a function to a POST request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def put(self, path=None, method='PUT', **kargs):
""" Decorator: Bind a function to a PUT request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def delete(self, path=None, method='DELETE', **kargs):
""" Decorator: Bind a function to a DELETE request path.
See :meth:'route' for details. """
return self.route(path, method, **kargs)
def error(self, code=500):
""" Decorator: Registrer an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def handle(self, url, method):
""" Execute the handler bound to the specified url and method and return
its output. If catchall is true, exceptions are catched and returned as
HTTPError(500) objects. """
if not self.serve:
return HTTPError(503, "Server stopped")
try:
handler, args = self.match_url(url, method)
return handler(**args)
except HTTPResponse, e:
return e
except Exception, e:
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
return HTTPError(500, 'Unhandled exception', e, format_exc(10))
def _cast(self, out, request, response, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Filtered types (recursive, because they may return anything)
for testtype, filterfunc in self.castfilter:
if isinstance(out, testtype):
return self._cast(filterfunc(out), request, response)
# Empty output is done here
if not out:
response.headers['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (StringType, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, StringType):
response.headers['Content-Length'] = str(len(out))
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
if isinstance(out, HTTPError):
out.apply(response)
return self._cast(self.error_handler.get(out.status, repr)(out), request, response)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output, request, response)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = out.next()
while not first:
first = out.next()
except StopIteration:
return self._cast('', request, response)
except HTTPResponse, e:
first = e
except Exception, e:
first = HTTPError(500, 'Unhandled exception', e, format_exc(10))
if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\
or not self.catchall:
raise
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first, request, response)
if isinstance(first, StringType):
return itertools.chain([first], out)
if isinstance(first, unicode):
return itertools.imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)), request, response)
def __call__(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind(self)
out = self.handle(request.path, request.method)
out = self._cast(out, request, response)
# rfc2616 section 4.3
if response.status in (100, 101, 204, 304) or request.method == 'HEAD':
out = []
status = '%d %s' % (response.status, HTTP_CODES[response.status])
start_response(status, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall:
raise
err = '<h1>Critical error while processing request: %s</h1>' \
% environ.get('PATH_INFO', '/')
if DEBUG:
err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e)
err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10)
environ['wsgi.errors'].write(err) #TODO: wsgi.error should not get html
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')])
return [tob(err)]
class Request(threading.local, DictMixin):
""" Represents a single HTTP request using thread-local attributes.
The Request object wraps a WSGI environment and can be used as such.
"""
def __init__(self, environ=None, config=None):
""" Create a new Request instance.
You usually don't do this but use the global `bottle.request`
instance instead.
"""
self.bind(environ or {}, config)
def bind(self, environ, config=None):
""" Bind a new WSGI enviroment.
This is done automatically for the global `bottle.request`
instance on every request.
"""
self.environ = environ
self.config = config or {}
# These attributes are used anyway, so it is ok to compute them here
self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/')
self.method = environ.get('REQUEST_METHOD', 'GET').upper()
@property
def _environ(self):
depr("Request._environ renamed to Request.environ")
return self.environ
def copy(self):
''' Returns a copy of self '''
return Request(self.environ.copy(), self.config)
def path_shift(self, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
script_name = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift)
self['PATH_INFO'] = self.path
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Shortcut for Request.environ.__setitem__ """
self.environ[key] = value
todelete = []
if key in ('PATH_INFO','REQUEST_METHOD'):
self.bind(self.environ, self.config)
elif key == 'wsgi.input': todelete = ('body','forms','files','params')
elif key == 'QUERY_STRING': todelete = ('get','params')
elif key.startswith('HTTP_'): todelete = ('headers', 'cookies')
for key in todelete:
if 'bottle.' + key in self.environ:
del self.environ['bottle.' + key]
@property
def query_string(self):
""" The content of the QUERY_STRING environment variable. """
return self.environ.get('QUERY_STRING', '')
@property
def fullpath(self):
""" Request path including SCRIPT_NAME (if present) """
return self.environ.get('SCRIPT_NAME', '').rstrip('/') + self.path
@property
def url(self):
""" Full URL as requested by the client (computed).
This value is constructed out of different environment variables
and includes scheme, host, port, scriptname, path and query string.
"""
scheme = self.environ.get('wsgi.url_scheme', 'http')
host = self.environ.get('HTTP_X_FORWARDED_HOST', self.environ.get('HTTP_HOST', None))
if not host:
host = self.environ.get('SERVER_NAME')
port = self.environ.get('SERVER_PORT', '80')
if scheme + port not in ('https443', 'http80'):
host += ':' + port
parts = (scheme, host, urlquote(self.fullpath), self.query_string, '')
return urlunsplit(parts)
@property
def content_length(self):
""" Content-Length header as an integer, -1 if not specified """
return int(self.environ.get('CONTENT_LENGTH','') or -1)
@property
def header(self):
''' :class:`HeaderDict` filled with request headers.
HeaderDict keys are case insensitive str.title()d
'''
if 'bottle.headers' not in self.environ:
header = self.environ['bottle.headers'] = HeaderDict()
for key, value in self.environ.iteritems():
if key.startswith('HTTP_'):
key = key[5:].replace('_','-').title()
header[key] = value
return self.environ['bottle.headers']
@property
def GET(self):
""" The QUERY_STRING parsed into a MultiDict.
Keys and values are strings. Multiple values per key are possible.
See MultiDict for details.
"""
if 'bottle.get' not in self.environ:
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = MultiDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return self.environ['bottle.get']
@property
def POST(self):
""" Property: The HTTP POST body parsed into a MultiDict.
This supports urlencoded and multipart POST requests. Multipart
is commonly used for file uploads and may result in some of the
values being cgi.FieldStorage objects instead of strings.
Multiple values per key are possible. See MultiDict for details.
"""
if 'bottle.post' not in self.environ:
self.environ['bottle.post'] = MultiDict()
self.environ['bottle.forms'] = MultiDict()
self.environ['bottle.files'] = MultiDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
# TODO: Content-Length may be wrong now. Does cgi.FieldStorage
# use it at all? I think not, because all tests pass.
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
if item.filename:
self.environ['bottle.post'][item.name] = item
self.environ['bottle.files'][item.name] = item
else:
self.environ['bottle.post'][item.name] = item.value
self.environ['bottle.forms'][item.name] = item.value
return self.environ['bottle.post']
@property
def forms(self):
""" Property: HTTP POST form data parsed into a MultiDict. """
if 'bottle.forms' not in self.environ: self.POST
return self.environ['bottle.forms']
@property
def files(self):
""" Property: HTTP POST file uploads parsed into a MultiDict. """
if 'bottle.files' not in self.environ: self.POST
return self.environ['bottle.files']
@property
def params(self):
""" A combined MultiDict with POST and GET parameters. """
if 'bottle.params' not in self.environ:
self.environ['bottle.params'] = MultiDict(self.GET)
self.environ['bottle.params'].update(dict(self.forms))
return self.environ['bottle.params']
@property
def body(self):
""" The HTTP request body as a seekable buffer object.
This property returns a copy of the `wsgi.input` stream and should
be used instead of `environ['wsgi.input']`.
"""
if 'bottle.body' not in self.environ:
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, MEMFILE_MAX))
if not part: #TODO: Wrong content_length. Error? Do nothing?
break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
self.environ['bottle.body'] = body
self.environ['bottle.body'].seek(0)
return self.environ['bottle.body']
@property
def auth(self): #TODO: Tests and docs. Add support for digest. namedtuple?
""" HTTP authorisation data as a (user, passwd) tuple. (experimental)
This implementation currently only supports basic auth and returns
None on errors.
"""
return parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
@property
def COOKIES(self):
""" Cookie information parsed into a dictionary.
Secure cookies are NOT decoded automatically. See
Request.get_cookie() for details.
"""
if 'bottle.cookies' not in self.environ:
raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
self.environ['bottle.cookies'] = {}
for cookie in raw_dict.itervalues():
self.environ['bottle.cookies'][cookie.key] = cookie.value
return self.environ['bottle.cookies']
def get_cookie(self, name, secret=None):
""" Return the (decoded) value of a cookie. """
value = self.COOKIES.get(name)
dec = cookie_decode(value, secret) if secret else None
return dec or value
@property
def is_ajax(self):
''' True if the request was generated using XMLHttpRequest '''
#TODO: write tests
return self.header.get('X-Requested-With') == 'XMLHttpRequest'
class Response(threading.local):
""" Represents a single HTTP response using thread-local attributes.
"""
def __init__(self, config=None):
self.bind(config)
def bind(self, config=None):
""" Resets the Response object to its factory defaults. """
self._COOKIES = None
self.status = 200
self.headers = HeaderDict()
self.content_type = 'text/html; charset=UTF-8'
self.config = config or {}
@property
def header(self):
depr("Response.header renamed to Response.headers")
return self.headers
def copy(self):
''' Returns a copy of self '''
copy = Response(self.config)
copy.status = self.status
copy.headers = self.headers.copy()
copy.content_type = self.content_type
return copy
def wsgiheader(self):
''' Returns a wsgi conform list of header/value pairs. '''
for c in self.COOKIES.values():
if c.OutputString() not in self.headers.getall('Set-Cookie'):
self.headers.append('Set-Cookie', c.OutputString())
# rfc2616 section 10.2.3, 10.3.5
if self.status in (204, 304) and 'content-type' in self.headers:
del self.headers['content-type']
if self.status == 304:
for h in ('allow', 'content-encoding', 'content-language',
'content-length', 'content-md5', 'content-range',
'content-type', 'last-modified'): # + c-location, expires?
if h in self.headers:
del self.headers[h]
return list(self.headers.iterallitems())
headerlist = property(wsgiheader)
@property
def charset(self):
""" Return the charset specified in the content-type header.
This defaults to `UTF-8`.
"""
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. Use Response.set_cookie() instead. """
if not self._COOKIES:
self._COOKIES = SimpleCookie()
return self._COOKIES
def set_cookie(self, key, value, secret=None, **kargs):
""" Add a new cookie with various options.
If the cookie value is not a string, a secure cookie is created.
Possible options are:
expires, path, comment, domain, max_age, secure, version, httponly
See http://de.wikipedia.org/wiki/HTTP-Cookie#Aufbau for details
"""
if not isinstance(value, basestring):
if not secret:
raise TypeError('Cookies must be strings when secret is not set')
value = cookie_encode(value, secret).decode('ascii') #2to3 hack
self.COOKIES[key] = value
for k, v in kargs.iteritems():
self.COOKIES[key][k.replace('_', '-')] = v
def get_content_type(self):
""" Current 'Content-Type' header. """
return self.headers['Content-Type']
def set_content_type(self, value):
self.headers['Content-Type'] = value
content_type = property(get_content_type, set_content_type, None,
get_content_type.__doc__)
# Data Structures
class MultiDict(DictMixin):
""" A dict that remembers old values for each key """
# collections.MutableMapping would be better for Python >= 2.6
def __init__(self, *a, **k):
self.dict = dict()
for k, v in dict(*a, **k).iteritems():
self[k] = v
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def keys(self): return self.dict.keys()
def __getitem__(self, key): return self.get(key, KeyError, -1)
def __setitem__(self, key, value): self.append(key, value)
def append(self, key, value): self.dict.setdefault(key, []).append(value)
def replace(self, key, value): self.dict[key] = [value]
def getall(self, key): return self.dict.get(key) or []
def get(self, key, default=None, index=-1):
if key not in self.dict and default != KeyError:
return [default][index]
return self.dict[key][index]
def iterallitems(self):
for key, values in self.dict.iteritems():
for value in values:
yield key, value
class HeaderDict(MultiDict):
""" Same as :class:`MultiDict`, but title()s the keys and overwrites by default. """
def __contains__(self, key): return MultiDict.__contains__(self, self.httpkey(key))
def __getitem__(self, key): return MultiDict.__getitem__(self, self.httpkey(key))
def __delitem__(self, key): return MultiDict.__delitem__(self, self.httpkey(key))
def __setitem__(self, key, value): self.replace(key, value)
def get(self, key, default=None, index=-1): return MultiDict.get(self, self.httpkey(key), default, index)
def append(self, key, value): return MultiDict.append(self, self.httpkey(key), str(value))
def replace(self, key, value): return MultiDict.replace(self, self.httpkey(key), str(value))
def getall(self, key): return MultiDict.getall(self, self.httpkey(key))
def httpkey(self, key): return str(key).replace('_','-').title()
class AppStack(list):
""" A stack implementation. """
def __call__(self):
""" Return the current default app. """
return self[-1]
def push(self, value=None):
""" Add a new Bottle instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
read, buff = self.fp.read, self.buffer_size
while True:
part = read(buff)
if not part: break
yield part
# Module level functions
# Output filter
def dict2json(d):
response.content_type = 'application/json'
return json_dumps(d)
def abort(code=500, text='Unknown Error: Appliction stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=303):
""" Aborts execution and causes a 303 redirect """
scriptname = request.environ.get('SCRIPT_NAME', '').rstrip('/') + '/'
location = urljoin(request.url, urljoin(scriptname, url))
raise HTTPResponse("", status=code, header=dict(Location=location))
def send_file(*a, **k): #BC 0.6.4
""" Raises the output of static_file(). (deprecated) """
raise static_file(*a, **k)
def static_file(filename, root, guessmime=True, mimetype=None, download=False):
""" Opens a file in a safe way and returns a HTTPError object with status
code 200, 305, 401 or 404. Sets Content-Type, Content-Length and
Last-Modified header. Obeys If-Modified-Since header and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if not mimetype and guessmime:
header['Content-Type'] = mimetypes.guess_type(filename)[0]
else:
header['Content-Type'] = mimetype if mimetype else 'text/plain'
if download == True:
download = os.path.basename(filename)
if download:
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = ims.split(";")[0].strip() # IE sends "<date>; length=146"
ims = parse_date(ims)
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
header['Content-Length'] = stats.st_size
if request.method == 'HEAD':
return HTTPResponse('', header=header)
else:
return HTTPResponse(open(filename, 'rb'), header=header)
# Utilities
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
name, pwd = base64.b64decode(data).split(':', 1)
return name, pwd
except (KeyError, ValueError, TypeError):
return None
def _lscmp(a, b):
''' Compares two strings in a cryptographically save way:
Runtime is not affected by a common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(key, msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(key, msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def tonativefunc(enc='utf-8'):
''' Returns a function that turns everything into 'native' strings using enc '''
if sys.version_info >= (3,0,0):
return lambda x: x.decode(enc) if isinstance(x, bytes) else str(x)
return lambda x: x.encode(enc) if isinstance(x, unicode) else str(x)
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example:
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
path = func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change ths shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
# Decorators
#TODO: Replace default_app() with app()
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
def decorator(func):
def wrapper(**kargs):
for key, value in vkargs.iteritems():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(**kargs)
return wrapper
return decorator
route = functools.wraps(Bottle.route)(lambda *a, **ka: app().route(*a, **ka))
get = functools.wraps(Bottle.get)(lambda *a, **ka: app().get(*a, **ka))
post = functools.wraps(Bottle.post)(lambda *a, **ka: app().post(*a, **ka))
put = functools.wraps(Bottle.put)(lambda *a, **ka: app().put(*a, **ka))
delete = functools.wraps(Bottle.delete)(lambda *a, **ka: app().delete(*a, **ka))
error = functools.wraps(Bottle.error)(lambda *a, **ka: app().error(*a, **ka))
url = functools.wraps(Bottle.get_url)(lambda *a, **ka: app().get_url(*a, **ka))
mount = functools.wraps(Bottle.mount)(lambda *a, **ka: app().mount(*a, **ka))
def default():
depr("The default() decorator is deprecated. Use @error(404) instead.")
return error(404)
# Server adapter
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **kargs):
self.options = kargs
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
CGIHandler().run(handler) # Just ignore host and port here
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
flup.server.fcgi.WSGIServer(handler, bindAddress=(self.host, self.port)).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
server.start()
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
app = TransLogger(handler)
httpserver.serve(app, host=self.host, port=str(self.port), **self.options)
class FapwsServer(ServerAdapter):
"""
Extremly fast webserver using libev.
See http://william-os4y.livejournal.com/
"""
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(self.host, self.port)
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('',app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" Untested. As described here:
http://github.com/facebook/tornado/blob/master/tornado/wsgi.py#L187 """
def run(self, handler): # pragma: no cover
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Untested. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GunicornServer(ServerAdapter):
""" Untested. """
def run(self, handler):
import gunicorn.arbiter
gunicorn.arbiter.Arbiter((self.host, self.port), 4, handler).run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. As requested in issue 63
http://github.com/defnull/bottle/issues/#issue/63 """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [CherryPyServer, PasteServer, TwistedServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
def run(app=None, server=WSGIRefServer, host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, **kargs):
""" Runs bottle as a web server. """
app = app if app else default_app()
# Instantiate server, if it is a class instead of an instance
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of WSGIAdapter")
server.quiet = server.quiet or quiet
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print "Bottle server starting up (using %s)..." % repr(server)
print "Listening on http://%s:%d/" % (server.host, server.port)
print "Use Ctrl-C to quit."
print
try:
if reloader:
interval = min(interval, 1)
if os.environ.get('BOTTLE_CHILD'):
_reloader_child(server, app, interval)
else:
_reloader_observer(server, app, interval)
else:
server.run(app)
except KeyboardInterrupt: pass
if not server.quiet and not os.environ.get('BOTTLE_CHILD'):
print "Shutting down..."
class FileCheckerThread(threading.Thread):
''' Thread that periodically checks for changed module files. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#1: lockfile to old; 2: lockfile missing
#3: module file changed; 5: external exit
self.status = 0
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in sys.modules.values():
try:
path = inspect.getsourcefile(module)
if path and exists(path): files[path] = mtime(path)
except TypeError: pass
while not self.status:
for path, lmtime in files.iteritems():
if not exists(path) or mtime(path) > lmtime:
self.status = 3
if not exists(self.lockfile):
self.status = 2
elif mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 1
if not self.status:
time.sleep(self.interval)
if self.status != 5:
thread.interrupt_main()
def _reloader_child(server, app, interval):
''' Start the server and check for modified files in a background thread.
As soon as an update is detected, KeyboardInterrupt is thrown in
the main thread to exit the server loop. The process exists with status
code 3 to request a reload by the observer process. If the lockfile
is not modified in 2*interval second or missing, we assume that the
observer process died and exit with status code 1 or 2.
'''
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
try:
bgcheck.start()
server.run(app)
except KeyboardInterrupt, e: pass
bgcheck.status, status = 5, bgcheck.status
bgcheck.join() # bgcheck.status == 5 --> silent exit
if status: sys.exit(status)
def _reloader_observer(server, app, interval):
''' Start a child process with identical commandline arguments and restart
it as long as it exists with status code 3. Also create a lockfile and
touch it (update mtime) every interval seconds.
'''
fd, lockfile = tempfile.mkstemp(prefix='bottle-reloader.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
try:
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
elif not server.quiet:
print "Reloading server..."
except KeyboardInterrupt: pass
if os.path.exists(lockfile): os.unlink(lockfile)
# Templates
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extentions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = map(os.path.abspath, lookup)
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extentions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, **args):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
#TODO: This is a hack... http://github.com/defnull/bottle/issues#issue/8
mylookup = TemplateLookup(directories=['.']+self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=mylookup)
else: #mako cannot guess extentions. We can, but only at top level...
name = self.name
if not os.path.splitext(name)[1]:
name += os.path.splitext(self.filename)[1]
self.tpl = mylookup.get_template(name)
def render(self, **args):
_defaults = self.defaults.copy()
_defaults.update(args)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, **args):
self.context.vars.update(self.defaults)
self.context.vars.update(args)
out = str(self.tpl)
self.context.vars.clear()
return [out]
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, **args):
_defaults = self.defaults.copy()
_defaults.update(args)
return self.tpl.render(**_defaults).encode("utf-8")
def loader(self, name):
fname = self.search(name, self.lookup)
if fname:
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
blocks = ('if','elif','else','try','except','finally','for','while','with','def','class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
def prepare(self, escape_func=cgi.escape, noescape=False):
self.cache = {}
if self.source:
self.code = self.translate(self.source)
self.co = compile(self.code, '<string>', 'exec')
else:
self.code = self.translate(open(self.filename).read())
self.co = compile(self.code, self.filename, 'exec')
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
def translate(self, template):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
touni = functools.partial(unicode, encoding=self.encoding)
multiline = dedent = False
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def split_comment(codeline):
""" Removes comments from a line of code. """
line = codeline.splitlines()[0]
try:
tokens = list(tokenize.generate_tokens(iter(line).next))
except tokenize.TokenError:
return line.rsplit('#',1) if '#' in line else (line, '')
for token in tokens:
if token[0] == tokenize.COMMENT:
start, end = token[2][1], token[3][1]
return codeline[:start] + codeline[end:], codeline[start:end]
return line, ''
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = line if isinstance(line, unicode)\
else unicode(line, encoding=self.encoding)
if lineno <= 2:
m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if line.strip()[:2].count('%') == 1:
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = split_comment(line)[0].strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() ##encodig (TODO: why?)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, **args):
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, **args)
def execute(self, _stdout, **args):
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape})
env.update(args)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
subtpl = self.__class__(name=subtpl, lookup=self.lookup)
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return subtpl.execute(_stdout, **rargs)
return env
def render(self, **args):
""" Render the template using keyword arguments as local variables. """
stdout = []
self.execute(stdout, **args)
return ''.join(stdout)
def template(tpl, template_adapter=SimpleTemplate, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
'''
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.get('template_settings',{})
lookup = kwargs.get('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
return TEMPLATES[tpl].render(**kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
# Modul initialization and configuration
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
MEMFILE_MAX = 1024*100
HTTP_CODES = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
""" A dict of known HTTP error and status codes """
ERROR_PAGE_TEMPLATE = SimpleTemplate("""
%try:
%from bottle import DEBUG, HTTP_CODES, request
%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{request.url}}</tt> caused an error:</p>
<pre>{{str(e.output)}}</pre>
%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%end
%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%end
</body>
</html>
%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to sys.path
%end
""")
""" The HTML template used for error messages """
request = Request()
""" Whenever a page is requested, the :class:`Bottle` WSGI handler stores
metadata about the current request into this instance of :class:`Request`.
It is thread-safe and can be accessed from within handler functions. """
response = Response()
""" The :class:`Bottle` WSGI handler uses metadata assigned to this instance
of :class:`Response` to generate the WSGI response. """
local = threading.local()
""" Thread-local namespace. Not used by Bottle, but could get handy """
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
| Python |
# Import settings.
from settings import *
# Log a message each time this module get loaded.
logging.info('Loading %s', __name__)
# Import datastore models.
from model import *
class Main(webapp.RequestHandler):
# Splash Page Entity.
def get(self):
logging.debug('Viewing Splash Page')
template_values = {}
path = tpl_path('home.html')
self.response.out.write(template.render(path, template_values))
# #######################
# LOAD QUIZZES
# #######################
class InfluenceQuiz(webapp.RequestHandler):
# Taught Or Not Quiz Model.
def get(self):
logging.debug('Viewing Influence Quiz')
global LOGINSTATUS
template_values = {
'url': login_url(self.request.uri),
'login_status' : LOGINSTATUS,
'url_linktext': login_text(),
'influence': random.choice(['influenced', 'influenced_by']),
}
path = tpl_path('influence_quiz.html')
self.response.out.write(template.render(path, template_values))
class DeathQuiz(webapp.RequestHandler):
def get(self):
logging.debug('Viewing Death Quiz')
global LOGINSTATUS
template_values = {
'url': login_url(self.request.uri),
'login_status' : LOGINSTATUS,
'url_linktext': login_text(),
'correct_top': random.choice(['true', 'false']),
}
path = tpl_path('death_quiz.html')
self.response.out.write(template.render(path, template_values))
# #######################
# VIEW SCORES
# #######################
class InfluenceViewScore(webapp.RequestHandler):
# View Taught Or Not Grade.
def get(self):
logging.debug('Loading Influence Score')
try:
grades = InfluenceGrade.gql("WHERE author = :author ORDER BY date DESC",
author=users.get_current_user())
latest_grades = InfluenceGrade.gql("WHERE author = :author ORDER BY date DESC LIMIT 6",
author=users.get_current_user())
logging.info('Loading all InfluenceGrade items user %s'
% users.get_current_user().nickname())
except:
raise_error('Error Retrieving Data From InfluenceGrade Model: %s'
% users.get_current_user())
try:
correct_item = InfluenceGrade.gql("WHERE score = :1 AND author = :2",
"correct", users.get_current_user() )
logging.info('Loading correct InfluenceGrade items user %s'
% users.get_current_user().nickname())
except:
raise_error('Error Retrieving Data From InfluenceGrade Model: %s'
% users.get_current_user())
totalscore = correct_item.count()
totalitems = grades.count()
percentage = 0
if totalitems > 0:
percentage = float(totalscore) / float(totalitems) * 100
percentage = int(percentage)
template_values = {
'grades': latest_grades,
'totalscore': totalscore,
'totalitems': totalitems,
'percentage': percentage,
'url': login_url(self.request.uri),
'url_linktext': login_text(),
}
path = tpl_path('influence_score.html')
self.response.out.write(template.render(path, template_values))
class DeathViewScore(webapp.RequestHandler):
# View Shot Or Not Grade.
def get(self):
logging.debug('Loading Death Score')
try:
grades = DeathGrade.gql("WHERE author = :author ORDER BY date DESC",
author=users.get_current_user())
latest_grades = DeathGrade.gql("WHERE author = :author ORDER BY date DESC LIMIT 6",
author=users.get_current_user())
logging.info('Loading all DeathGrade items user %s'
% users.get_current_user().nickname())
except:
raise_error('Error Retrieving Data From DeathGrade Model: %s'
% users.get_current_user())
try:
correct_item = DeathGrade.gql("WHERE score = :1 AND author = :2",
"correct", users.get_current_user() )
logging.info('Loading correct DeathGrade items user %s'
% users.get_current_user().nickname())
except:
raise_error('Error Retrieving Data From DeathGrade Model: %s'
% users.get_current_user())
totalscore = correct_item.count()
totalitems = grades.count()
percentage = 0
if totalitems > 0:
percentage = float(totalscore) / float(totalitems) * 100
percentage = int(percentage)
template_values = {
'grades': latest_grades,
'totalscore': totalscore,
'totalitems': totalitems,
'percentage': percentage,
'url': login_url(self.request.uri),
'url_linktext': login_text(),
}
path = tpl_path('death_score.html')
self.response.out.write(template.render(path, template_values))
# #######################
# STORE QUIZ SCORES
# #######################
class InfluenceTurnIn(webapp.RequestHandler):
# Store Taught Or Not Grade.
def get(self):
logging.debug('Posting Influence Grade')
global LOGINSTATUS
if users.get_current_user():
grade = InfluenceGrade()
grade.subject_name = self.request.get('subject_name')
grade.subject_id = self.request.get('subject_id')
grade.object_id = self.request.get('object_id')
grade.object_name = self.request.get('object_name')
grade.influence_type = self.request.get('influence_type')
grade.author = users.get_current_user()
LOGINSTATUS = users.get_current_user()
if self.request.get('answer') == "_app":
grade.score = "correct"
else:
grade.score = "incorrect"
try:
grade.put()
logging.info('InfluenceGrade entered by user %s'
% users.get_current_user().nickname())
except:
raise_error('Error saving grade for user %s, with score %s: %s %s'
% grade.author, grade.score, grade.subject_id,
grade.object_id)
else:
LOGINSTATUS = "not_logged_in"
logging.info('User Not Logged In')
self.redirect('/taught')
class DeathTurnIn(webapp.RequestHandler):
# Store Shot Or Not Grade.
def get(self):
logging.debug('Posting Death Grade')
global LOGINSTATUS
if users.get_current_user():
grade = DeathGrade()
grade.subject_name = self.request.get('subject_name')
grade.subject_id = self.request.get('subject_id')
grade.cause = self.request.get('_cause')
grade.fake_cause = self.request.get('cause')
grade.author = users.get_current_user()
LOGINSTATUS = users.get_current_user()
if self.request.get('death_type') == self.request.get('_cause'):
grade.score = "correct"
else:
grade.score = "incorrect"
try:
grade.put()
logging.info('DeathGrade entered by user %s'
% users.get_current_user().nickname())
except:
raise_error('Error saving grade for user %s, with score %s: %s %s'
% grade.author, grade.score, grade.subject_id,
grade.object_id)
else:
LOGINSTATUS = "not_logged_in"
logging.info('User Not Logged In')
self.redirect('/shot')
| Python |
# #######################
# IMPORTS AND BASIC SETUP
# #######################
import unittest
import sys
import os.path
# Change the following line to reflect wherever your
# app engine installation and the mocker library are
APPENGINE_PATH = '../google/appengine'
MOCKER_PATH = '../mocker'
# Add app-engine related libraries to your path
paths = [
APPENGINE_PATH,
MOCKER_PATH,
]
for path in paths:
if not os.path.exists(path):
raise 'Path does not exist: %s' % path
sys.path = paths + sys.path
# ###############
# TEST DEFINITION
# ###############
import mocker
import handler
import model
class UnitTests(mocker.MockerTestCase):
"""Unit tests for the MainPage and GuestBook handler."""
def setUp(self):
self.request = self.mocker.mock()
self.response = self.mocker.mock()
self.handler1 = testy.InfluenceViewScore()
self.handler2 = testy.DeathViewScore()
self.handler3 = testy.InfluenceTurnIn()
self.handler4 = testy.DeathTurnIn()
self.handler1.request = self.request
self.handler1.response = self.response
self.handler2.request = self.request
self.handler2.response = self.response
self.handler3.request = self.request
self.handler3.response = self.response
self.handler4.request = self.request
self.handler4.response = self.response
self.InfluenceGrade = self.mocker.replace('testy.InfluenceGrade')
self.DeathGrade = self.mocker.replace('testy.DeathGrade')
self.users = self.mocker.replace('google.appengine.api.users')
self.template = self.mocker.replace('google.appengine.ext.webapp.template')
def testInfluenceGradeGetWhenLoggedIn(self):
# What should happen in a regular request?
# First, we create a query on all grades
all_query = self.mocker.mock()
self.InfluenceGrade.all()
self.mocker.result(all_query)
ordered_query = self.mocker.mock()
all_query.order('-date')
self.mocker.result(ordered_query)
ordered_query.fetch(10)
self.mocker.result('Query result for template')
# We are currently logged into a fake user,
# thus a logout-url is created
fake_user = self.mocker.mock()
self.users.get_current_user()
self.mocker.result(fake_user)
self.request.uri
self.mocker.result('fake uri')
self.users.create_logout_url('fake uri')
self.mocker.result('fake logout uri')
# With that data, rendering will be invoked
out = self.mocker.mock()
self.response.out
self.mocker.result(out)
self.template.render(mocker.ANY, mocker.ANY)
def checkArgs(path, params):
template_values = {
'grades': latest_grades,
'totalscore': totalscore,
'totalitems': totalitems,
'percentage': percentage,
'url': login_url(self.request.uri),
'url_linktext': login_text(), }
self.assert_(path.endswith('influence_score.html'))
self.assertEqual(template_values, params)
return '<html/>'
self.mocker.call(checkArgs)
out.write('<html/>')
self.mocker.replay()
self.handler1.get()
def testInfluenceGradeGetWhenLoggedOut(self):
# What should happen in a regular request?
# First, we create a query on all grades
all_query = self.mocker.mock()
self.InfluenceGrade.all()
self.mocker.result(all_query)
ordered_query = self.mocker.mock()
all_query.order('-date')
self.mocker.result(ordered_query)
ordered_query.fetch(10)
self.mocker.result('Query result for template')
# We are currently logged into a fake user,
# thus a login-url is created
fake_user = self.mocker.mock()
self.users.get_current_user()
self.mocker.result(None)
self.request.uri
self.mocker.result('fake uri')
self.users.create_login_url('fake uri')
self.mocker.result('fake login uri')
# With that data, rendering will be invoked
out = self.mocker.mock()
self.response.out
self.mocker.result(out)
self.template.render(mocker.ANY, mocker.ANY)
def checkArgs(path, params):
template_values = {
'grades': latest_grades,
'totalscore': totalscore,
'totalitems': totalitems,
'percentage': percentage,
'url': login_url(self.request.uri),
'url_linktext': login_text(), }
self.assert_(path.endswith('influence_score.html'))
self.assertEqual(template_values, params)
return '<html/>'
self.mocker.call(checkArgs)
out.write('<html/>')
self.mocker.replay()
self.handler1.get()
def testDeathGradeGetWhenLoggedIn(self):
# What should happen in a regular request?
# First, we create a query on all grades
all_query = self.mocker.mock()
self.DeathGrade.all()
self.mocker.result(all_query)
ordered_query = self.mocker.mock()
all_query.order('-date')
self.mocker.result(ordered_query)
ordered_query.fetch(10)
self.mocker.result('Query result for template')
# We are currently logged into a fake user,
# thus a logout-url is created
fake_user = self.mocker.mock()
self.users.get_current_user()
self.mocker.result(fake_user)
self.request.uri
self.mocker.result('fake uri')
self.users.create_logout_url('fake uri')
self.mocker.result('fake logout uri')
# With that data, rendering will be invoked
out = self.mocker.mock()
self.response.out
self.mocker.result(out)
self.template.render(mocker.ANY, mocker.ANY)
def checkArgs(path, params):
template_values = {
'grades': latest_grades,
'totalscore': totalscore,
'totalitems': totalitems,
'percentage': percentage,
'url': login_url(self.request.uri),
'url_linktext': login_text(), }
self.assert_(path.endswith('death_score.html'))
self.assertEqual(template_values, params)
return '<html/>'
self.mocker.call(checkArgs)
out.write('<html/>')
self.mocker.replay()
self.handler2.get()
def testDeathGradeGetWhenLoggedOut(self):
# What should happen in a regular request?
# First, we create a query on all grades
all_query = self.mocker.mock()
self.DeathGrade.all()
self.mocker.result(all_query)
ordered_query = self.mocker.mock()
all_query.order('-date')
self.mocker.result(ordered_query)
ordered_query.fetch(10)
self.mocker.result('Query result for template')
# We are currently logged into a fake user,
# thus a login-url is created
fake_user = self.mocker.mock()
self.users.get_current_user()
self.mocker.result(None)
self.request.uri
self.mocker.result('fake uri')
self.users.create_login_url('fake uri')
self.mocker.result('fake login uri')
# With that data, rendering will be invoked
out = self.mocker.mock()
self.response.out
self.mocker.result(out)
self.template.render(mocker.ANY, mocker.ANY)
def checkArgs(path, params):
template_values = {
'grades': latest_grades,
'totalscore': totalscore,
'totalitems': totalitems,
'percentage': percentage,
'url': login_url(self.request.uri),
'url_linktext': login_text(), }
self.assert_(path.endswith('death_score.html'))
self.assertEqual(template_values, params)
return '<html/>'
self.mocker.call(checkArgs)
out.write('<html/>')
self.mocker.replay()
self.handler2.get()
def testInfluenceTurnInWhenLoggedIn(self):
# First, a new grade is created
grade = self.mocker.mock()
self.InfluenceGrade()
self.mocker.result(grade)
# The user is fetched and assigned to the grade
fake_user = self.mocker.mock()
self.users.get_current_user()
self.mocker.count(2)
self.mocker.result(fake_user)
grade.author = fake_user
# Next, the content is fetched from a request parameter
self.request.get('content')
self.mocker.result('mock content')
grade.content = 'mock content'
# Last but not least, store the post and redirect
grade.put()
self.handler3.redirect = lambda x: self.assertEquals('/taught', x)
# Everything is recorded, so let's go into replay mode :-)
self.mocker.replay()
self.handler3.post()
def testInfluenceTurnInWhenLoggedOut(self):
handler3 = self.mocker.mock(testy.InfluenceTurnIn)
handler3.request
self.mocker.count(0,10)
self.mocker.result(self.request)
handler3.response
self.mocker.count(0,10)
self.mocker.result(self.response)
# First, a new grade is created
grade = self.mocker.mock()
self.InfluenceGrade()
self.mocker.result(grade)
# The user is None and thus not assigned to the grade
self.users.get_current_user()
self.mocker.result(None)
# Next, the content is fetched from a request parameter
self.request.get('content')
self.mocker.result('mock content')
grade.content = 'mock content'
# Last but not least, store the post and redirect
# TODO: (check blog post for more information)
grade.put()
mock_redirect = self.mocker.mock()
handler3.redirect('/taught')
self.mocker.replay()
testy.InfluenceTurnIn.post(handler3)
def testDeathTurnInWhenLoggedIn(self):
# First, a new grade is created
grade = self.mocker.mock()
self.DeathGrade()
self.mocker.result(grade)
# The user is fetched and assigned to the grade
fake_user = self.mocker.mock()
self.users.get_current_user()
self.mocker.count(2)
self.mocker.result(fake_user)
grade.author = fake_user
# Next, the content is fetched from a request parameter
self.request.get('content')
self.mocker.result('mock content')
grade.content = 'mock content'
# Last but not least, store the post and redirect
grade.put()
self.handler4.redirect = lambda x: self.assertEquals('/shot', x)
# Everything is recorded, so let's go into replay mode :-)
self.mocker.replay()
self.handler4.post()
def testDeathTurnInWhenLoggedOut(self):
handler4 = self.mocker.mock(testy.DeathTurnIn)
handler4.request
self.mocker.count(0,10)
self.mocker.result(self.request)
handler3.response
self.mocker.count(0,10)
self.mocker.result(self.response)
# First, a new grade is created
grade = self.mocker.mock()
self.DeathGrade()
self.mocker.result(grade)
# The user is None and thus not assigned to the grade
self.users.get_current_user()
self.mocker.result(None)
# Next, the content is fetched from a request parameter
self.request.get('content')
self.mocker.result('mock content')
grade.content = 'mock content'
grade.put()
mock_redirect = self.mocker.mock()
handler4.redirect('/shot')
self.mocker.replay()
testy.InfluenceTurnIn.post(handler4)
# #####################
# LAUNCH UNIT TESTS
# #####################
if __name__ == "__main__":
unittest.main()
| Python |
from google.appengine.ext import db
from google.appengine.api import users
import logging
# Log a message each time this module get loaded.
logging.info('Loading %s', __name__)
class InfluenceGrade(db.Model):
# Taught Or Not Grades.
author = db.UserProperty()
score = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
subject_name = db.StringProperty()
subject_id = db.StringProperty()
object_id = db.StringProperty()
object_name = db.StringProperty()
influence_type = db.StringProperty()
class DeathGrade(db.Model):
# Shot Or Not Grades.
author = db.UserProperty()
score = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
subject_name = db.StringProperty()
subject_id = db.StringProperty()
cause = db.StringProperty()
fake_cause = db.StringProperty()
| Python |
import cgi
import wsgiref.handlers
import random
import os
import logging
from google.appengine.ext.webapp import template
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
global LOGINSTATUS
LOGINSTATUS = "unknown"
def tpl_path(template_file_name):
return os.path.join(os.path.dirname(__file__),
'templates', template_file_name)
def login_url(uri):
# Construct Login/Logout URL.
if users.get_current_user():
url = users.create_logout_url(uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(uri)
url_linktext = 'Login'
return url
def login_text():
# Construct Login/Logout Text.
if users.get_current_user():
LOGINSTATUS = "logged in"
url_linktext = 'Logout'
else:
url_linktext = 'Login'
return url_linktext
def raise_error(error_string):
# Raise and Log Error
logging.error(error_string)
raise error_string
| Python |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
""" Testy: a simple Freebase application running on Google's App Engine.
Taught or Not tests knowledge of the genealogy of influence.
Shot or Not tests knowledge of historical causes of death.
"""
# Import WSGI handlers.
from handler import *
from unittests import *
# Log a message each time this module get loaded.
logging.info('Loading %s', __name__)
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication(
[
('/',
Main),
('/taught/?',
InfluenceQuiz),
('/taught/quiz/?',
InfluenceQuiz),
('/taught/grade/?',
InfluenceTurnIn),
('/taught/score/?',
InfluenceViewScore),
('/shot/?',
DeathQuiz),
('/shot/quiz/?',
DeathQuiz),
('/shot/grade/?',
DeathTurnIn),
('/shot/score/?',
DeathViewScore),
('/test/?',
UnitTests)
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
# encoding: utf-8
from RegexCaller import RegexCaller
from BeautifulSoup import BeautifulSoup
from Debug import *
class HorizontalParser:
def __init__(self,Content):
self.Regex = RegexCaller()
self.Regex.Add("Tags","<.*?>.*</.*?>")
self.Regex.Add("Conditionals","<!\[.*?>")
self.Regex.Add("Comment","<!--.*?>")
self.Regex.Add("Tag","<.*?>")
self.Regex.Add("Spaces"," ")
self.Regex.Add("Headers","<th")
self.Regex.Add("Escapes","&....;")
self.Regex.Add("Tabs","[\t]")
self.Regex.Add("Newlines","[\n]")
self.Regex.Add("\R","\r")
self.Regex.Add("Quotes","\"")
self.Regex.Add("Clean","<table.*?>.*?</table.*>")
self.Content = ''.join(self.Regex.FindAll(Content,"Clean"))
DebugPrint(Content)
self.Content = self.Regex.Replace(self.Content,"Conditionals","")
self.Content = self.Regex.Replace(self.Content,"Comment","")
self.Content = self.Regex.Replace(self.Content,"Headers","<td")
self.Content = self.Regex.Replace(self.Content,"Escapes","")
self.Content = self.Regex.Replace(self.Content,"Tabs"," ")
self.Content = self.Regex.Replace(self.Content,"\R","")
self.Content = self.Regex.Replace(self.Content,"Quotes","'")
self.Soup = BeautifulSoup(self.Content)
self.Tables = self.Soup.findAll("table")
def GetTableName(self,TableIndex):
return self.Regex.Replace(str(self.Tables[TableIndex].tr.td),"Tag","").strip()
def GetTableHeaders(self,TableIndex,HasTitle = False):
Table = self.Tables[TableIndex]
i = 0
if HasTitle:
i = 1
Row = Table.findAll("tr")[i]
Headers = []
for Col in Row.findAll("td"):
Headers.append(self.Regex.Replace(self.Regex.Replace(str(Col),"Tag",""),"Spaces","_").strip())
return Headers
def GetTableData(self,TableIndex,HasTitle=False,HasHeaders=False,NewHeaders=None,IndexBy=None,SkipRows=0,ProcessRows=None):
Table = self.Tables[TableIndex]
Data = {}
i = SkipRows
if HasTitle:
i = i + 1
if HasHeaders:
i = i + 1
if NewHeaders == None:
NewHeaders = self.GetTableHeaders(TableIndex,HasTitle)
if ProcessRows != None:
ProcessRows = ProcessRows+i
DataRows = Table.findAll("tr")[i:ProcessRows]
i = 0
for Row in DataRows:
i = i + 1
RowData = {}
RowCol = Row.td
for Col in NewHeaders:
RowData[str(Col)] = self.Regex.Replace(str(RowCol),"Tag","").strip()
RowCol = RowCol.findNextSibling()
if IndexBy == None:
Index = i
else:
Index = RowData[IndexBy]
Data[Index] = RowData
return Data
class VerticalParser:
def __init__(self,Content):
self.Regex = RegexCaller()
self.Regex.Add("Tags","<.*?>.*</.*?>")
self.Regex.Add("Conditionals","<!\[.*?>")
self.Regex.Add("Comment","<!--.*?>")
self.Regex.Add("Tag","<.*?>")
self.Regex.Add("Spaces"," ")
self.Regex.Add("Headers","<th")
self.Regex.Add("Escapes","&....;")
self.Regex.Add("Tabs","[\t]")
self.Regex.Add("Newlines","[\n,\r]")
self.Regex.Add("Quotes","\"")
self.Regex.Add("Clean","<t.*?>.*</t.*>")
self.Content = ''.join(self.Regex.FindAll(Content,"Clean"))
self.Content = self.Regex.Replace(self.Content,"Tabs","")
self.Content = self.Regex.Replace(self.Content,"Conditionals","")
self.Content = self.Regex.Replace(self.Content,"Comment","")
self.Content = self.Regex.Replace(self.Content,"Headers","<td")
self.Content = self.Regex.Replace(self.Content,"Escapes","")
self.Content = self.Regex.Replace(self.Content,"Tabs"," ")
self.Content = self.Regex.Replace(self.Content,"Quotes","'")
self.Soup = BeautifulSoup(self.Content)
self.Tables = self.Soup.findAll("table")
def GetTableName(self,TableIndex):
return self.Regex.Replace(str(self.Tables[TableIndex].tr.td),"Tag","").strip()
def GetTableHeaders(self,TableIndex,HasTitle = False):
Table = self.Tables[TableIndex]
i = 0
if HasTitle:
i = 1
Headers = []
Rows = Table.findAll('tr')[i:]
for el in Rows:
Headers.append(self.Regex.Replace(self.Regex.Replace(str(el.td),"Tag",""),"Spaces","_").strip())
return Headers
def GetTableData(self,TableIndex,HasTitle=False,HasHeaders=False,NewHeaders=None,IndexBy=None,SkipRows=0,ProcessRows=None):
Table = self.Tables[TableIndex]
DataDict = {}
Data = []
i = SkipRows
if HasTitle:
i = i + 1
if HasHeaders and NewHeaders != None:
NewHeaders = self.GetTableHeaders(TableIndex,HasTitle)
if ProcessRows != None:
ProcessRows = ProcessRows+i
Rows = Table.findAll("tr")[i:ProcessRows]
i = 0
for Row in Rows:
i = i + 1
RowData = []
Cols = Row.findAll("td")[1:]
for Col in Cols:
RowData.append(str(Col).strip())
Data.append(RowData)
if len(Data) == 0:
return {}
for i in range(0,len(Data[0])):
RowDict = {}
for j in range(0,len(Data)):
RowDict[NewHeaders[j]]=self.Regex.Replace(str(Data[j][i]),"Tag","").strip()
if IndexBy != None:
DataDict[RowDict[IndexBy]] = RowDict
else:
DataDict[i] = RowDict
return DataDict
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wsgiref.handlers
from MainHandler import MainHandler as Handler
from google.appengine.ext import webapp
class RequestHandler(webapp.RequestHandler):
def get(self):
MyHandler = Handler(self.request.path)
self.response.out.write(MyHandler.CreateReponse())
def main():
application = webapp.WSGIApplication([('.*', RequestHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
import re
class RegexCaller:
def __init__(self,Expressions = {}):
self.Expressions = Expressions
def Add(self,ExpressionName,Expression,Options = 0):
self.Expressions[ExpressionName] = re.compile(Expression,Options)
def Replace(self,String,ExpressionName,With):
return re.sub(self.Expressions[ExpressionName],With,String)
def FindAll(self,String,ExpressionName):
return re.findall(self.Expressions[ExpressionName],String)
| Python |
from datetime import date
from google.appengine.api import urlfetch
import re
from Parser import HorizontalParser,VerticalParser
from Formats import *
from Debug import *
class FRCEvent:
def __init__(self,EventCode,Year=None):
if Year == None:
Year = date.today().year
self.Year = int(Year)
self.EventCode = EventCode
self.QualificationResults = None
self.EliminationResults = None
self.Awards = None
self.Rankings = None
self.QualificationSchedule = None
self.EliminationSchedule = None
self.TeamList = None
def LoadURL(self,URL):
Content = urlfetch.fetch(URL).content
#Admittedly, this is a hack. Pat uses window.location= to redirect instead of a proper HTTP response so this is the easiest way
URL = re.search("window.location.*?=.*?\"(.*)\"",Content).group(1)
try:
DebugPrint("URL="+URL)
return urlfetch.fetch(URL).content
except:
return ""
def GetCacheTime(self):
CacheTime = 600 #10 minutes
if self.QualificationResults != None or self.EliminationResults != None:
CacheTime = 60
return CacheTime
def GetQualificationResults(self):
if self.QualificationResults == None:
URL = "http://frclinks.com/e/m/%s/%i"%(self.EventCode,self.Year)
Content = self.LoadURL(URL)
QualificationResultsHorizontalParser = HorizontalParser(Content)
self.QualificationResults = QualificationResultsHorizontalParser.GetTableData(TableIndex=2,HasHeaders=True,HasTitle=True)
return self.QualificationResults
def GetEliminationResults(self):
if self.EliminationResults == None:
URL = "http://frclinks.com/e/m/%s/%i"%(self.EventCode,self.Year)
Content = self.LoadURL(URL)
EliminationResultsHorizontalParser = HorizontalParser(Content)
self.EliminationResults = EliminationResultsHorizontalParser.GetTableData(TableIndex=3,HasHeaders=True,HasTitle=True)
return self.EliminationResults
def GetAwards(self):
if self.Awards == None:
URL = "http://frclinks.com/e/a/%s/%i"%(self.EventCode,self.Year)
Content = self.LoadURL(URL)
AwardsHorizontalParser = HorizontalParser(Content)
self.Awards = AwardsHorizontalParser.GetTableData(TableIndex=2,HasTitle=False,HasHeaders=True)
return self.Awards
def GetRankings(self):
if self.Rankings == None:
URL = "http://frclinks.com/e/r/%s/%i"%(self.EventCode,self.Year)
Content = self.LoadURL(URL)
RankingsParser = HorizontalParser(Content)
self.Rankings = RankingsParser.GetTableData(TableIndex=2,HasTitle=False,HasHeaders=True)
return self.Rankings
def GetQualificationSchedule(self):
if self.QualificationSchedule == None:
URL = "http://frclinks.com/e/s/%s/%i"%(self.EventCode,self.Year)
Content = self.LoadURL(URL)
QualificationScheduleParser = HorizontalParser(Content)
self.QualificationSchedule = QualificationScheduleParser.GetTableData(TableIndex=2,HasTitle=True,HasHeaders=True)
return self.QualificationSchedule
def GetEliminationSchedule(self):
if self.EliminationSchedule == None:
URL = "http://www2.usfirst.org/%icomp/events/%s/scheduleelim.html"%(self.Year,self.EventCode)
Content = urlfetch.fetch(URL).content
EliminationScheduleParser = HorizontalParser(Content)
self.EliminationSchedule = EliminationScheduleParser.GetTableData(TableIndex=2,HasTitle=True,HasHeaders=True)
return self.EliminationSchedule
def GetTeamList(self):
if self.TeamList == None:
DebugPrint(self.EventCode)
URL = "http://frclinks.com/e/%s/%i"%(self.EventCode,self.Year)
Content = self.LoadURL(URL).decode('utf-8')
TeamListParser = HorizontalParser(Content)
self.TeamList = TeamListParser.GetTableData(TableIndex = 0,HasTitle = False, HasHeaders = False,SkipRows=4, NewHeaders=["Location","Team_Name","Team_Number"])
return self.TeamList
def GetTeamInfo(TeamNo,HistoryFormat=None):
URL = "http://frclinks.com/t/%s"%(TeamNo)
Content = urlfetch.fetch(URL).content
#Admittedly, this is a hack. Pat uses window.location= to redirect instead of a proper HTTP response so this is the easiest way
URL = re.search("window.location.*?=.*?\"(.*)\"",Content).group(1)
Content = urlfetch.fetch(URL).content
TeamParser = VerticalParser(Content)
if HistoryFormat == None:
Data = TeamParser.GetTableData(TableIndex=1,HasTitle=True,HasHeaders=False,SkipRows = 2,ProcessRows=7,NewHeaders = ["Team_Number","Team_Name","Team_Location","Rookie_Season","Team_Nickname","Team_Motto","Team_Website"])[0]
Data["History"]=GenerateJSON(HorizontalParser(Content).GetTableData(TableIndex=4,HasTitle=False,HasHeaders=True,NewHeaders=["Year","Event","Awards"]))
return Data
else:
Data = TeamParser.GetTableData(TableIndex=1,HasTitle=True,HasHeaders=False,SkipRows = 2,ProcessRows=7,NewHeaders = ["Team_Number","Team_Name","Team_Location","Rookie_Season","Team_Nickname","Team_Motto","Team_Website"])[0]
Data["History"]=HistoryFormat.Generate(Body=HorizontalParser(Content).GetTableData(TableIndex=4,HasTitle=False,HasHeaders=True,NewHeaders=["Year","Event","Awards"]))
return Data
def GetTeamHistory(TeamNo):
URL = "http://frclinks.com/t/%s"%(TeamNo)
Content = urlfetch.fetch(URL).content
#Admittedly, this is a hack. Pat uses window.location= to redirect instead of a proper HTTP response so this is the easiest way
URL = re.search("window.location.*?=.*?\"(.*)\"",Content).group(1)
Content = urlfetch.fetch(URL).content
return HorizontalParser(Content).GetTableData(TableIndex=4,HasTitle=False,HasHeaders=True,NewHeaders=["Year","Event","Awards"])
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wsgiref.handlers
from MainHandler import MainHandler as Handler
from google.appengine.ext import webapp
class RequestHandler(webapp.RequestHandler):
def get(self):
MyHandler = Handler(self.request.path)
self.response.out.write(MyHandler.CreateReponse())
def main():
application = webapp.WSGIApplication([('.*', RequestHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2009, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.1.0.1"
__copyright__ = "Copyright (c) 2004-2009 Leonard Richardson"
__license__ = "New-style BSD"
import codecs
import markupbase
import types
import re
from HTMLParser import HTMLParser, HTMLParseError
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
def sob(unicode, encoding):
"""Returns either the given Unicode string or its encoding."""
if encoding is None:
return unicode
else:
return unicode.encode(encoding)
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.decode().encode(encoding)
def decodeGivenEventualEncoding(self, eventualEncoding):
return self
class CData(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<![CDATA[' + self + u']]>'
class ProcessingInstruction(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
output = self
if u'%SOUP-ENCODING%' in output:
output = self.substituteEncoding(output, eventualEncoding)
return u'<?' + output + u'?>'
class Comment(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!--' + self + u'-->'
class Declaration(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!' + self + u'>'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
def convert(kval):
"Converts HTML, XML and numeric entities in the attribute value."
k, val = kval
if val is None:
return kval
return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities, val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.decode(eventualEncoding=encoding)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decode(prettyPrint, indentLevel, encoding).encode(encoding)
def decode(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding."""
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if (self.containsSubstitutions
and eventualEncoding is not None
and '%SOUP-ENCODING%' in val):
val = self.substituteEncoding(val, eventualEncoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
if val is None:
# Handle boolean attributes.
decoded = key
else:
decoded = fmt % (key, val)
attrs.append(decoded)
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % self.name
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.decodeContents(prettyPrint, indentContents,
eventualEncoding)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (self.name, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.encode(encoding, True)
def encodeContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decodeContents(prettyPrint, indentLevel).encode(encoding)
def decodeContents(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.decodeGivenEventualEncoding(eventualEncoding)
elif isinstance(c, Tag):
s.append(c.decode(prettyPrint, indentLevel, eventualEncoding))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods. Will go away in 4.0.
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
# 3.x compatibility methods. Will go away in 4.0.
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if encoding is None:
return self.decodeContents(prettyPrint, indentLevel, encoding)
else:
return self.encodeContents(encoding, prettyPrint, indentLevel)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
def childGenerator(self):
if not len(self.contents):
raise StopIteration
current = self.contents[0]
while current:
yield current
current = current.nextSibling
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup is not None and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif (isList(matchAgainst)
and (markup is not None or not isString(matchAgainst))):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return ((hasattr(l, '__iter__') and not isString(l))
or (type(l) in (types.ListType, types.TupleType)))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion) and not isString(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class HTMLParserBuilder(HTMLParser):
def __init__(self, soup):
HTMLParser.__init__(self)
self.soup = soup
# We inherit feed() and reset().
def handle_starttag(self, name, attrs):
if name == 'meta':
self.soup.extractCharsetFromMeta(attrs)
else:
self.soup.unknown_starttag(name, attrs)
def handle_endtag(self, name):
self.soup.unknown_endtag(name)
def handle_data(self, content):
self.soup.handle_data(content)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.soup.endData()
self.handle_data(text)
self.soup.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.soup.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.soup.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.soup.convertXMLEntities:
data = self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.soup.convertHTMLEntities and \
not self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = HTMLParser.parse_declaration(self, i)
except HTMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulStoneSoup(Tag):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False,
builder=HTMLParserBuilder):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
HTMLParser will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
HTMLParser, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke HTMLParser:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
self.builder = builder(self)
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed.
self.builder = None # So can the builder.
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.builder.reset()
self.builder.feed(markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def extractCharsetFromMeta(self, attrs):
self.unknown_starttag('meta', attrs)
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def extractCharsetFromMeta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity."""
orig = match.group(1)
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
smart_quotes_re = "([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._subMSChar, markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_re = '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode()
xml_encoding_match = re.compile(xml_encoding_re).match(xml_data)
if not xml_encoding_match and isHTML:
meta_re = '<\s*meta[^>]+charset=([^>]*?)[;\'">]'.encode()
regexp = re.compile(meta_re, re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].decode(
'ascii').lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
from datetime import date
ValidEvents = {
"az":"Arizona",
"or":"Autodesk Oregon",
"nh":"BAE Granite State",
"la":"Bayou",
"in":"Boilermaker",
"ma":"Boston",
"oh":"Buckeye",
"md":"Chesapeake",
"co":"Colorado",
"ct":"Connecticut",
"da":"Dallas",
"roc":"Finger Lakes",
"fl":"Florida",
"kc":"Greater Kansas City",
"on":"Greater Toronto",
"hi":"Hawaii",
"is":"Israel",
"nv":"Las Vegas",
"tx":"Lone Star",
"ca":"Los Angeles",
"wa":"Microsoft Seattle",
"il":"Midwest",
"mn":"Minnesota 10000 Lakes",
"mn2":"Minnesota North Star",
"nj":"New Jersey",
"ny":"New York City",
"nc":"North Carolina",
"ok":"Oklahoma",
"sc":"Palmetto",
"ga":"Peachtree",
"pa":"Philadelphia",
"pit":"Pittsburgh",
"sac":"Sacramento",
"sdc":"San Diego",
"li":"SBPLI Long Island",
"sj":"Silicon Valley",
"mo":"St. Louis",
"ut":"Utah",
"va":"Virginia",
"dc":"Washington DC",
"wat":"Waterloo",
"wi":"Wisconsin",
"wor":"WPI",
"wc":"Ann Arbor MI District",
"dt1":"Cass Tech MI District",
"dt":"Detroit MI District",
"gg":"Kettering University MI District",
"gt":"Traverse City MI District",
"oc":"Troy MI District",
"mi":"West Michigan MI District",
"gl":"Michigan State Championship",
"cmp":"FIRST Championship",
"arc":"FIRST Championship - Archimedes Division",
"cur":"FIRST Championship - Curie Division",
"gal":"FIRST Championship - Galileo Division",
"new":"FIRST Championship - Newton Division",
"ein":"FIRST Championship - Einstein Field",
"az":"az",
"or":"or",
"nh":"nh",
"la":"la",
"in":"in",
"ma":"ma",
"oh":"oh",
"md":"md",
"co":"co",
"ct":"ct",
"da":"da",
"roc":"roc",
"fl":"fl",
"kc":"kc",
"on":"on",
"hi":"hi",
"is":"is",
"nv":"nv",
"tx":"tx",
"ca":"ca",
"wa":"wa",
"il":"il",
"mn":"mn",
"mn2":"mn2",
"nj":"nj",
"ny":"ny",
"nc":"nc",
"ok":"ok",
"sc":"sc",
"ga":"ga",
"pa":"pa",
"pit":"pit",
"sac":"sac",
"sdc":"sdc",
"li":"li",
"sj":"sj",
"mo":"mo",
"ut":"ut",
"va":"va",
"dc":"dc",
"wat":"wat",
"wi":"wi",
"wor":"wor",
"wc":"wc",
"dt1":"dt1",
"dt":"dt",
"gg":"gg",
"gt":"gt",
"oc":"oc",
"mi":"mi"
}
ValidWhat = {
"rankings":False,"awards":False,"schedule":True,"results":True,"team_list":False,"team_info":False,"team_history":False
}
Format = {
"xml":"Formats/XML/%s.xml",
"json":"Formats/JSON/%s.xml",
"human":"Formats/Human/%s.xml"
}
Years = ["2010","2009","2008","2007"]
When = ["qualification","elimination"]
class URLParser:
def __init__(self,url):
self.URLParts = url.lower().strip().split("/")
self.What = None
self.When = None
self.Year = date.today().year
self.Top = None
self.Bottom = None
self.Format = "xml"
self.FormatPath = None
self.Event = None
self.Error = []
self.TeamNo = None
def Parse(self):
RequireTeamNo = None
RequireWhen = None
for Part in self.URLParts:
if Part in Years:
self.Year = int(Part)
if Part in Format:
self.Format = Part
Part = ""
if Part in ValidWhat:
RequireWhen = (ValidWhat[Part] and self.When == None)
self.What = Part
if Part == "team_history":
RequireTeamNo = self.TeamNo == None
Part = ""
if Part in ValidEvents:
self.Event = Part
Part = ""
if Part in When:
self.When = Part
RequireWhen = False
Part = ""
if len(Part.split("-")) == 2:
splits = Part.split("-")
if splits[0].isdigit():
self.Top = int(splits[0])-1
if splits[1].isdigit():
self.Bottom = int(splits[1])
Part = ""
if Part in ValidEvents:
self.Event = ValidEvents[Part]
Part = ""
if Part.isdigit():
self.TeamNo = int(Part)
RequireTeamNo = False
if self.What == None:
self.Error.append("Please specify what you want.\n")
if RequireTeamNo:
self.Error.append("Please specify a team number.\n")
if RequireWhen:
self.Error.append("Please specify either Elimination or Qualification.\n")
def GetFormatPath(self):
if not ValidWhat[self.What]:
self.FormatPath = Format[self.Format]%(self.What)
else:
self.FormatPath = Format[self.Format]%("%s_%s"%(self.When,self.What))
return self.FormatPath
def GetFormatTemplate(self):
return Format[self.Format]
def Hash(self):
return hash("%s-%s-%s-%s-%s"%(self.Year,self.Event,self.What,self.When,self.TeamNo))
| Python |
#!/usr/bin/env python
# encoding: utf-8
import re
class RegexCaller:
def __init__(self,Expressions = {}):
self.Expressions = Expressions
def Add(self,ExpressionName,Expression,Options = 0):
self.Expressions[ExpressionName] = re.compile(Expression,Options)
def Replace(self,String,ExpressionName,With):
return re.sub(self.Expressions[ExpressionName],With,String)
def FindAll(self,String,ExpressionName):
return re.findall(self.Expressions[ExpressionName],String)
| Python |
from string import Template
from BeautifulSoup import BeautifulStoneSoup
import re
from google.appengine.ext import db
class OutputTemplate():
def __init__(self,TemplateDefFile):
#Comment out the next 4 lines to run using the template files instead of the Datastore ones
#Templates = db.GqlQuery("SELECT * FROM DataStoreTemplate WHERE name=:1",TemplateDefFile.lower())
#if Templates.count()==0:
# Templates = db.GqlQuery("SELECT * FROM DataStoreTemplate WHERE name=:1","default")
#self. Definition = Templates[0].text
#Uncomment the next line if you are running a local copy and have a Formats folder
self.Definition = file(TemplateDefFile).read()
self.Header = Template(re.search("<Header>(.*)</Header>.*<Body>",self.Definition,re.S).group(1))
self.Body = Template(re.search("<Body>(.*)</Body>",self.Definition,re.S).group(1))
self.Footer = Template(re.search("<Footer>(.*)</Footer>",self.Definition,re.S).group(1))
def Generate(self,Header=None,Body=None,Footer=None):
HeaderContents = self.Header.safe_substitute(Header).lstrip()
FooterContents = self.Footer.safe_substitute(Footer).rstrip()
Contents = []
if Body != None:
for Key in Body.keys():
Contents.append(self.Body.safe_substitute(Body[Key]))
self.LastGenerated = '%s%s%s'%(HeaderContents,''.join(Contents),FooterContents)
return self.LastGenerated
class DataStoreTemplate(db.Model):
name = db.StringProperty(required = True)
text = db.TextProperty(required = True) | Python |
from google.appengine.ext import db
from google.appengine.ext import webapp
import wsgiref.handlers
class DataStoreTemplate(db.Model):
name = db.StringProperty(required = True)
text = db.TextProperty(required = True)
class Creator(webapp.RequestHandler):
def get(self):
self.response.out.write("""
<html>
<body>
<form method="post">
Name:<input type="text" name="name">
<div><textarea name="content" rows="40" cols="60"></textarea></div>
<div><input type="submit" value="Add New Template"></div>
</form>
</body>
</html>""")
def post(self):
Entries = db.GqlQuery("SELECT * FROM DataStoreTemplate WHERE name=:1",self.request.get('name').lower())
if Entries.count()!=0:
Entries[0].text = db.Text(self.request.get('content'))
Entries[0].put()
self.get()
return
else:
store = DataStoreTemplate(name=self.request.get('name').lower(),text=db.Text(self.request.get('content')))
store.put()
self.get()
def main():
application = webapp.WSGIApplication([('.*', Creator)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main() | Python |
from URLParser import URLParser
from Formats import OutputTemplate
from FRCFeed import FRCEvent,GetTeamHistory,GetTeamInfo
from Debug import *
class MainHandler:
def __init__(self,url):
self.URL = URLParser(url)
self.URL.Parse()
def CreateReponse(self):
self.Event = FRCEvent(self.URL.Event,self.URL.Year)
if len(self.URL.Error) > 0:
return
MyTemplate = OutputTemplate(self.URL.GetFormatPath())
if self.URL.What == 'rankings':
self.Event.GetRankings()
return MyTemplate.Generate(Body=self.Event.Rankings)
if self.URL.What == 'awards':
self.Event.GetAwards()
return MyTemplate.Generate(Body=self.Event.Awards)
if self.URL.What == 'schedule':
if self.URL.When == 'qualification':
self.Event.GetQualificationSchedule()
return MyTemplate.Generate(Body=self.Event.QualificationSchedule)
if self.URL.When == 'elimination':
self.Event.GetEliminationSchedule()
return MyTemplate.Generate(Body=self.Event.EliminationSchedule)
if self.URL.What == 'team_info':
MyTeamHistoryTemplate = OutputTemplate(self.URL.GetFormatTemplate()%("Team_History"))
return MyTemplate.Generate(Header=GetTeamInfo(self.URL.TeamNo,MyTeamHistoryTemplate))
if self.URL.What == 'team_list':
DebugPrint("Event Code="+self.URL.Event)
self.Event.GetTeamList()
return MyTemplate.Generate(Body=self.Event.TeamList)
if self.URL.What == 'team_history':
return MyTemplate.Generate(Body=GetTeamHistory(self.URL.TeamNo))
if self.URL.What == 'results':
if self.URL.When == 'qualification':
self.Event.GetQualificationResults()
return MyTemplate.Generate(Body=self.Event.QualificationResults)
if self.URL.When == 'elimination':
self.Event.GetEliminationResults()
return MyTemplate.Generate(Body=self.Event.EliminationResults)
| Python |
#This file contains various methods for debugging issues. First and foremost it contains DebugPrint and the DEBUG boolean, all functions in this file are to do NOTHING if DEBUG is False
#Set to True to enable Debug* Functions
DEBUG = False
if DEBUG:
def DebugPrint(Content):
print Content
else:
def DebugPrint(Content):
return
| Python |
#!/usr/bin/env python
# encoding: utf-8
from RegexCaller import RegexCaller
from BeautifulSoup import BeautifulSoup
from Debug import *
class HorizontalParser:
def __init__(self,Content):
self.Regex = RegexCaller()
self.Regex.Add("Tags","<.*?>.*</.*?>")
self.Regex.Add("Conditionals","<!\[.*?>")
self.Regex.Add("Comment","<!--.*?>")
self.Regex.Add("Tag","<.*?>")
self.Regex.Add("Spaces"," ")
self.Regex.Add("Headers","<th")
self.Regex.Add("Escapes","&....;")
self.Regex.Add("Tabs","[\t]")
self.Regex.Add("Newlines","[\n]")
self.Regex.Add("\R","\r")
self.Regex.Add("Quotes","\"")
self.Regex.Add("Clean","<table.*?>.*?</table.*>")
self.Content = ''.join(self.Regex.FindAll(Content,"Clean"))
DebugPrint(Content)
self.Content = self.Regex.Replace(self.Content,"Conditionals","")
self.Content = self.Regex.Replace(self.Content,"Comment","")
self.Content = self.Regex.Replace(self.Content,"Headers","<td")
self.Content = self.Regex.Replace(self.Content,"Escapes","")
self.Content = self.Regex.Replace(self.Content,"Tabs"," ")
self.Content = self.Regex.Replace(self.Content,"\R","")
self.Content = self.Regex.Replace(self.Content,"Quotes","'")
self.Soup = BeautifulSoup(self.Content)
self.Tables = self.Soup.findAll("table")
def GetTableName(self,TableIndex):
return self.Regex.Replace(str(self.Tables[TableIndex].tr.td),"Tag","").strip()
def GetTableHeaders(self,TableIndex,HasTitle = False):
Table = self.Tables[TableIndex]
i = 0
if HasTitle:
i = 1
Row = Table.findAll("tr")[i]
Headers = []
for Col in Row.findAll("td"):
Headers.append(self.Regex.Replace(self.Regex.Replace(str(Col),"Tag",""),"Spaces","_").strip())
return Headers
def GetTableData(self,TableIndex,HasTitle=False,HasHeaders=False,NewHeaders=None,IndexBy=None,SkipRows=0,ProcessRows=None):
Table = self.Tables[TableIndex]
Data = {}
i = SkipRows
if HasTitle:
i = i + 1
if HasHeaders:
i = i + 1
if NewHeaders == None:
NewHeaders = self.GetTableHeaders(TableIndex,HasTitle)
if ProcessRows != None:
ProcessRows = ProcessRows+i
DataRows = Table.findAll("tr")[i:ProcessRows]
i = 0
for Row in DataRows:
i = i + 1
RowData = {}
RowCol = Row.td
for Col in NewHeaders:
RowData[str(Col)] = self.Regex.Replace(str(RowCol),"Tag","").strip()
RowCol = RowCol.findNextSibling()
if IndexBy == None:
Index = i
else:
Index = RowData[IndexBy]
Data[Index] = RowData
return Data
class VerticalParser:
def __init__(self,Content):
self.Regex = RegexCaller()
self.Regex.Add("Tags","<.*?>.*</.*?>")
self.Regex.Add("Conditionals","<!\[.*?>")
self.Regex.Add("Comment","<!--.*?>")
self.Regex.Add("Tag","<.*?>")
self.Regex.Add("Spaces"," ")
self.Regex.Add("Headers","<th")
self.Regex.Add("Escapes","&....;")
self.Regex.Add("Tabs","[\t]")
self.Regex.Add("Newlines","[\n,\r]")
self.Regex.Add("Quotes","\"")
self.Regex.Add("Clean","<t.*?>.*</t.*>")
self.Content = ''.join(self.Regex.FindAll(Content,"Clean"))
self.Content = self.Regex.Replace(self.Content,"Tabs","")
self.Content = self.Regex.Replace(self.Content,"Conditionals","")
self.Content = self.Regex.Replace(self.Content,"Comment","")
self.Content = self.Regex.Replace(self.Content,"Headers","<td")
self.Content = self.Regex.Replace(self.Content,"Escapes","")
self.Content = self.Regex.Replace(self.Content,"Tabs"," ")
self.Content = self.Regex.Replace(self.Content,"Quotes","'")
self.Soup = BeautifulSoup(self.Content)
self.Tables = self.Soup.findAll("table")
def GetTableName(self,TableIndex):
return self.Regex.Replace(str(self.Tables[TableIndex].tr.td),"Tag","").strip()
def GetTableHeaders(self,TableIndex,HasTitle = False):
Table = self.Tables[TableIndex]
i = 0
if HasTitle:
i = 1
Headers = []
Rows = Table.findAll('tr')[i:]
for el in Rows:
Headers.append(self.Regex.Replace(self.Regex.Replace(str(el.td),"Tag",""),"Spaces","_").strip())
return Headers
def GetTableData(self,TableIndex,HasTitle=False,HasHeaders=False,NewHeaders=None,IndexBy=None,SkipRows=0,ProcessRows=None):
Table = self.Tables[TableIndex]
DataDict = {}
Data = []
i = SkipRows
if HasTitle:
i = i + 1
if HasHeaders and NewHeaders != None:
NewHeaders = self.GetTableHeaders(TableIndex,HasTitle)
if ProcessRows != None:
ProcessRows = ProcessRows+i
Rows = Table.findAll("tr")[i:ProcessRows]
i = 0
for Row in Rows:
i = i + 1
RowData = []
Cols = Row.findAll("td")[1:]
for Col in Cols:
RowData.append(str(Col).strip())
Data.append(RowData)
if len(Data) == 0:
return {}
for i in range(0,len(Data[0])):
RowDict = {}
for j in range(0,len(Data)):
RowDict[NewHeaders[j]]=self.Regex.Replace(str(Data[j][i]),"Tag","").strip()
if IndexBy != None:
DataDict[RowDict[IndexBy]] = RowDict
else:
DataDict[i] = RowDict
return DataDict
| Python |
#!/usr/bin/env python
#
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.api import memcache
from Parser import *
from FRCHelper import *
DESC = False
ASC = True
events = ("az","or","nh","la","in","ma","oh","md","co","ct","da","roc","fl","kc","on","hi","is","nv","tx","ca","wa","il","mn","mn2","nj","ny","nc","ok","sc","ga","pa","pit","sac","sdc","li","sj","mo","ut","va","dc","wat","wi","wor","wc","dt1","dt","gg","gt","oc","mi")
class RequestHandler(webapp.RequestHandler):
def write(self,x):
self.response.out.write(x)
def get(self):
# URLS = map(lambda code: "http://www.frclinks.com/e/m/%s/2009"%(code),events)
# URL = "https://my.usfirst.org/myarea/index.lasso?page=teamlist&event_type=FRC&sort_teams=number&year=2009&event=gg"
if self.request.get('event') == '':
return
matches = memcache.get("%s:%s"%(self.request.get('event'),'qm'))
if matches is None:
DT1 = HorizontalParser(FRCLinks_URL_Loader("http://www.frclinks.com/e/m/%s/2009"%(self.request.get('event'))))
matches = QualificationMatches(DT1)
memcache.add("%s:%s"%(self.request.get('event'),'qm'),matches, 3000)
# self.write(matches)
filters = parseFilters(self.request.get_all('filters'))
filtered = filter(filterByAny(filters),matches)
if len(filtered) == 0:
return
self.write("<table width='100%'>")
self.write("<tr>")
map(lambda col: self.write("<td>%s</td>"%(col)),filtered[1].keys())
self.write("</tr>")
for row in filtered:
self.write("<tr>")
for cell in row.values():
self.write("<td>%s</td>"%(cell))
self.write("</tr>")
# Attendance = map(lambda Event: (Event,TeamList(HorizontalParser(FRCLinks_URL_Loader("http://www.frclinks.com/e/%s"%(Event))))), events[0])
# self.write(json.dumps(Attendance))
# self.write(self.request.get_all('filter'))
def main():
application = webapp.WSGIApplication([('.*', RequestHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.api import memcache
from Parser import *
from FRCHelper import *
DESC = False
ASC = True
events = ("az","or","nh","la","in","ma","oh","md","co","ct","da","roc","fl","kc","on","hi","is","nv","tx","ca","wa","il","mn","mn2","nj","ny","nc","ok","sc","ga","pa","pit","sac","sdc","li","sj","mo","ut","va","dc","wat","wi","wor","wc","dt1","dt","gg","gt","oc","mi")
class RequestHandler(webapp.RequestHandler):
def write(self,x):
self.response.out.write(x)
def get(self):
# URLS = map(lambda code: "http://www.frclinks.com/e/m/%s/2009"%(code),events)
# URL = "https://my.usfirst.org/myarea/index.lasso?page=teamlist&event_type=FRC&sort_teams=number&year=2009&event=gg"
if self.request.get('event') == '':
return
matches = memcache.get("%s:%s"%(self.request.get('event'),'qm'))
if matches is None:
DT1 = HorizontalParser(FRCLinks_URL_Loader("http://www.frclinks.com/e/m/%s/2009"%(self.request.get('event'))))
matches = QualificationMatches(DT1)
memcache.add("%s:%s"%(self.request.get('event'),'qm'),matches, 3000)
# self.write(matches)
filters = parseFilters(self.request.get_all('filters'))
filtered = filter(filterByAny(filters),matches)
if len(filtered) == 0:
return
self.write("<table width='100%'>")
self.write("<tr>")
map(lambda col: self.write("<td>%s</td>"%(col)),filtered[1].keys())
self.write("</tr>")
for row in filtered:
self.write("<tr>")
for cell in row.values():
self.write("<td>%s</td>"%(cell))
self.write("</tr>")
# Attendance = map(lambda Event: (Event,TeamList(HorizontalParser(FRCLinks_URL_Loader("http://www.frclinks.com/e/%s"%(Event))))), events[0])
# self.write(json.dumps(Attendance))
# self.write(self.request.get_all('filter'))
def main():
application = webapp.WSGIApplication([('.*', RequestHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2009, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.1.0.1"
__copyright__ = "Copyright (c) 2004-2009 Leonard Richardson"
__license__ = "New-style BSD"
import codecs
import markupbase
import types
import re
from HTMLParser import HTMLParser, HTMLParseError
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
def sob(unicode, encoding):
"""Returns either the given Unicode string or its encoding."""
if encoding is None:
return unicode
else:
return unicode.encode(encoding)
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.decode().encode(encoding)
def decodeGivenEventualEncoding(self, eventualEncoding):
return self
class CData(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<![CDATA[' + self + u']]>'
class ProcessingInstruction(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
output = self
if u'%SOUP-ENCODING%' in output:
output = self.substituteEncoding(output, eventualEncoding)
return u'<?' + output + u'?>'
class Comment(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!--' + self + u'-->'
class Declaration(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!' + self + u'>'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
def convert(kval):
"Converts HTML, XML and numeric entities in the attribute value."
k, val = kval
if val is None:
return kval
return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities, val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.decode(eventualEncoding=encoding)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decode(prettyPrint, indentLevel, encoding).encode(encoding)
def decode(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding."""
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if (self.containsSubstitutions
and eventualEncoding is not None
and '%SOUP-ENCODING%' in val):
val = self.substituteEncoding(val, eventualEncoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
if val is None:
# Handle boolean attributes.
decoded = key
else:
decoded = fmt % (key, val)
attrs.append(decoded)
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % self.name
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.decodeContents(prettyPrint, indentContents,
eventualEncoding)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (self.name, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.encode(encoding, True)
def encodeContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decodeContents(prettyPrint, indentLevel).encode(encoding)
def decodeContents(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.decodeGivenEventualEncoding(eventualEncoding)
elif isinstance(c, Tag):
s.append(c.decode(prettyPrint, indentLevel, eventualEncoding))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods. Will go away in 4.0.
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
# 3.x compatibility methods. Will go away in 4.0.
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if encoding is None:
return self.decodeContents(prettyPrint, indentLevel, encoding)
else:
return self.encodeContents(encoding, prettyPrint, indentLevel)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
def childGenerator(self):
if not len(self.contents):
raise StopIteration
current = self.contents[0]
while current:
yield current
current = current.nextSibling
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup is not None and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif (isList(matchAgainst)
and (markup is not None or not isString(matchAgainst))):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return ((hasattr(l, '__iter__') and not isString(l))
or (type(l) in (types.ListType, types.TupleType)))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion) and not isString(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class HTMLParserBuilder(HTMLParser):
def __init__(self, soup):
HTMLParser.__init__(self)
self.soup = soup
# We inherit feed() and reset().
def handle_starttag(self, name, attrs):
if name == 'meta':
self.soup.extractCharsetFromMeta(attrs)
else:
self.soup.unknown_starttag(name, attrs)
def handle_endtag(self, name):
self.soup.unknown_endtag(name)
def handle_data(self, content):
self.soup.handle_data(content)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.soup.endData()
self.handle_data(text)
self.soup.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.soup.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.soup.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.soup.convertXMLEntities:
data = self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.soup.convertHTMLEntities and \
not self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = HTMLParser.parse_declaration(self, i)
except HTMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulStoneSoup(Tag):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False,
builder=HTMLParserBuilder):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
HTMLParser will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
HTMLParser, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke HTMLParser:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
self.builder = builder(self)
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed.
self.builder = None # So can the builder.
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.builder.reset()
self.builder.feed(markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def extractCharsetFromMeta(self, attrs):
self.unknown_starttag('meta', attrs)
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def extractCharsetFromMeta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity."""
orig = match.group(1)
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
smart_quotes_re = "([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._subMSChar, markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_re = '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode()
xml_encoding_match = re.compile(xml_encoding_re).match(xml_data)
if not xml_encoding_match and isHTML:
meta_re = '<\s*meta[^>]+charset=([^>]*?)[;\'">]'.encode()
regexp = re.compile(meta_re, re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].decode(
'ascii').lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
#This file defines various convenience methods for filtering as well as some other handy functions
import re
CONVERSIONS = {"int":int,"float":float,"string":str,"number":int}
def QualificationMatches(page):
return page.parseByName("Qualification Matches")
def EliminationMatches(page):
return page.parseByName("Elimination Matches")
def QualificationSchedule(page):
return page.parseByName("Qualification Schedule")
def EliminationSchedule(page):
return page.parseByName("Elimination Schedule")
def TeamList(page):
return page.parseByIndex(2,['Location','Name','Number'],3)
def Awards(page):
return page.parseByIndex(2)
def Standings(page):
return page.parseByIndex(2)
def safeConvert(val,toType):
try:
return toType(val)
except:
return None
#Generic helper methods, they take a list of functions.
def filterByAll(filters):
if filters:
return lambda item: all(map(lambda function: function(item),filters))
return lambda item: True
def filterByAny(filters):
if filters:
return lambda item: any(map(lambda function: function(item),filters))
return lambda item: True
def greaterThan(cell,convFunc,number):
return lambda item: convFunc(item[re.sub("_"," ",cell)]) > number
def lessThan(cell,convFunc,number):
return lambda item: convFunc(item[re.sub("_"," ",cell)]) < number
#Don't use this function with floats, equality for floats isn't assured
def equalTo(cell,convFunc,number):
return lambda item: convFunc(item[re.sub("_"," ",cell)]) == number
def getByKey(el,key):
return el[re.sub("_"," ",key)]
def sortByKeys(L,keys,asType = int, ascending = True):
if keys:
L.sort(key = lambda x: map(lambda key: safeConvert(getByKey(x,key),asType),keys), reverse = not ascending)
return L
CONVERSIONS = {"int":int,"float":float,"string":str}
TYPES = {"OR":filterByAny,"AND":filterByAll,"":filterByAll}
def safeConvertArgs(val,toType):
try:
return CONVERSIONS[toType](val)
except:
return None
#Standings related stuff
def hasMoreWins(wins):
return lambda match: int(match["Wins"]) > int(wins)
def hasLessWins(wins):
return lambda match: int(match["Wins"]) < int(wins)
def hasLosses(Losses):
return lambda match: int(match["Losses"]) == losses
def hasMoreLosses(Losses):
return lambda match: int(match["Losses"]) > losses
def hasLessLosses(Losses):
return lambda match: int(match["Losses"]) < losses
def hasLosses(Losses):
return lambda match: int(match["Losses"]) == losses
def aboveRank(rank):
return lambda match: int(match["Rank"]) > rank
def belowRank(rank):
return lambda match: int(match["Rank"]) < rank
def isRank(rank):
return lambda match: int(match["Rank"]) == rank
def aboveQS(qs):
return lambda match: int(float(match["QS"])) > qs
def belowQS(qs):
return lambda match: int(float(match["QS"])) < qs
def isQS(qs):
return lambda match: int(float(match["QS"])) == qs
def aboveRS(rs):
return lambda match: float(match["RS"]) >= rs
def belowRS(rs):
return lambda match: float(match["RS"]) <= rs
def aboveMP(mp):
return lambda match: float(match["RS"]) > mp
def belowMP(mp):
return lambda match: float(match["RS"]) < mp
def playedAtLeast(matches):
return lambda match: int(match["Played"]) > matches
def playedAtMost(matches):
return lambda match: int(match["Played"]) < matches
def playedMatches(matches):
return lambda match: int(match["Played"]) == matches
# Team List related filters
def isTeam(number):
return lambda team: int(team["Number"]) == number
#Match Related Stuff, works for schedule and result pages.
def redWins(match):
return int(match['Red Score']) > int(match['Blue Score'])
def blueWins(match):
return int(match['Red Score']) < int(match['Blue Score'])
def tie(match):
return int(match['Red Score']) == int(match['Blue Score'])
def redAlliance(match):
return (int(match['Red 1']),int(match['Red 2']),int(match['Red 3']))
def blueAlliance(match):
return (int(match['Blue 1']),int(match['Blue 2']),int(match['Blue 3']))
def teams(match):
return redAlliance(match) + blueAlliance(match)
def played(team):
return (lambda match: int(team)in teams(match))
def isRed(team):
return (lambda match: int(team)in redAlliance(match))
def isBlue(team):
return (lambda match: int(team)in blueAlliance(match))
def completed(match):
return (match['Red Score'] != '' and match['Blue Score'] != '')
def winningAlliance(match):
if blueWins(match):
return blueAlliance(match)
if redWins(match):
return redAlliance(match)
else:
return ()
def losingAlliance(match):
if blueWins(match):
return redAlliance(match)
if redWins(match):
return blueAlliance(match)
else:
return ()
def won(team):
return lambda match: int(team)in winningAlliance(match)
def lost(team):
return lambda match: int(team)in losingAlliance(match)
def isElim(match):
return "Description" in match.keys()
def isFinal(match):
return (match['Description'].find("Final") >= 0)
def isSemi(match):
return (match['Description'].find("Semi") >= 0)
def isQtr(match):
return (match['Description'].find("Qtr") >= 0)
FILTERS = {
"hasMoreWins":hasMoreWins,
"hasLessWins":hasLessWins,
"hasLosses":hasLosses,
"hasMoreLosses":hasMoreLosses,
"hasLessLosses":hasLessLosses,
"hasLosses":hasLosses,
"aboveRank":aboveRank,
"belowRank":belowRank,
"isRank":isRank,
"aboveQS":aboveQS,
"belowQS":belowQS,
"isQS":isQS,
"aboveRS":aboveRS,
"belowRS":belowRS,
"aboveMP":aboveMP,
"belowMP":belowMP,
"playedAtLeast":playedAtLeast,
"playedAtMost":playedAtMost,
"playedMatches":playedMatches,
"isTeam":isTeam,
"redWins":redWins,
"blueWins":blueWins,
"tie":tie,
"redAlliance":redAlliance,
"blueAlliance":blueAlliance,
"teams":teams,
"played":played,
"isRed":isRed,
"isBlue":isBlue,
"completed":completed,
"winningAlliance":winningAlliance,
"losingAlliance":losingAlliance,
"won":won,
"lost":lost,
"isElim":isElim,
"isFinal":isFinal,
"isSemi":isSemi,
"isQtr":isQtr
}
def generateFilter(arg):
parts = arg.split("-")
return FILTERS[parts[0]](*(parts[1:]))
def parseFilters(filters):
fList = []
for fset in filters:
fSetList = []
fset = re.sub("[\(\)]","",fset)
for f in fset.split(","):
fSetList.append(generateFilter(f))
fList.append(filterByAll(fSetList))
return fList
pass
def generateSort(arg):
return re.sub("[\(\)]","",arg).split(",")
pass | Python |
from BeautifulSoup import BeautifulSoup
from google.appengine.api import urlfetch
import re
import copy
VALID_TAGS = ['html','table','tr','th','td']
def FRCLinks_URL_Loader(URL):
Content = urlfetch.fetch(URL).content
#Admittedly, this is a hack. Pat uses window.location= to redirect instead of a proper HTTP response so this is the easiest way
URL = re.search("window.location.*?=.*?\"(.*)\"",Content).group(1)
try:
return urlfetch.fetch(URL).content
except:
return ""
class HorizontalParser:
def __init__(self,Content):
myNewMassage = copy.copy(BeautifulSoup.MARKUP_MASSAGE)
Content = re.sub("<\!D.*?>","",Content)
Content = re.sub("<!--(.*?)-->",lambda match: ""+match.group(1)+"",Content)
dataMassage = [(re.compile('<!'), lambda match: '<'),(re.compile('<br.*?>'),lambda match: '\n')]
myNewMassage.extend(dataMassage)
#Because 4FX doesn't know how to make clean HTML code we have to beat its brains in with a bat to make it work.
#A dash of pepper, some potatoes, some carrots and soon we will have some great soup. (Call BeautifulSoup on the Content and work its magic)
soup = BeautifulSoup(Content,markupMassage = myNewMassage)
#Remove all non-valid tags (ie, p, span, div,image, a, etc...)
map(lambda tag: setattr(tag,"hidden",tag not in VALID_TAGS), soup.findAll(True))
#Grab the tables
self.tables = soup.findAll('table')
#This will return the first tables with the given name.
def tableByName(self,name):
tables = filter( lambda table: str(table.tr.td).strip() == name, self.tables)
tables.append(None)
return tables[0]
#This will return the index+1th table on the page (ie index of 0 will return the 1st table)
def tableByIndex(self,index):
return self.tables[index]
#This function will parse the named table, if the table has a name use this one as it will adjust for the title row
def parseByName(self,name,headers = None,skip = 0):
#First, we need to get the table
table = self.tableByName(name)
#If headers weren't supplied by the call we assume the first non-title row contains them
index = 1 + skip #Because this table has a title we start at row 1 instead of row 0
#If the headers aren't named (ie they are in the table) we get those
if not headers:
headers = self.getHeaders(table,index)
#don't want to process this 2x right?
index += 1
return self.parse(table,index,headers)
# This function will parse the indexed table, if the table lacks a name use this one as it will adjust for the lack of title row
def parseByIndex(self,index,headers = None, skip = 0):
#First, we need to get the table
table = self.tableByIndex(index)
#If headers weren't supplied by the call we assume the first non-title row contains them
index = 0 + skip #Because this table lacks a title we start at row 0
if not headers:
headers = self.getHeaders(table,index)
index += 1
return self.parse(table,index,headers)
def getHeaders(self,table,index):
rows = table.findAll('tr')
return map( lambda cell: str(cell).strip() , rows[index].findAll('td'))
def parse(self,table,index,headers):
rows = table.findAll('tr')
return map(lambda row: dict(zip(headers,map(lambda cell: re.sub("<.*?>","",str(cell).strip()), row.findAll('td')))), rows[index:])
def parseAll(self):
return map(self.parseByIndex,range(0,len(self.tables))) | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
FRCParser.py
Created by Andrew Schreiber on 2009-10-17.
"""
from BeautifulSoup import BeautifulSoup
import re
import string
#We only want certain tags, all others will be removed. Basically this makes our life easier
VALID_TAGS = ['table', 'tr', 'td']
#content is the contents, as a string.
#all other options merely pass them through to the parse_table function.
def parse(content,indexCol = 'Match',skipTitle = False, asInt = False):
content = re.sub("!","",content)
tables = sanitize_html(content).findAll('table')
data = {}
for i in range(0,len(tables)):
try:
table_data = parse_table(tables[i],indexCol,skipTitle,i,asInt)
data[table_data[0]] = table_data[1]
except:
pass
return data
#Return an associated array of each element in the table
#tData is the table data, it is actually a BeautifulSoup Node
#indexCol specifies the Name of the column that will be used, if none is specified ints will be used.
#skipTitle tells the parser that there is no title on the table. For example Awards has no title but Qualification Schedule does.
#customTitle lets you tell the parser what the title of this table is.
#asInt tells us to cast the indexCol as an Int.
def parse_table(tData,indexCol=None,skipTitle = False, customtitle='Unknown',asInt=False):
column_names = {}
tableData = {}
title = array_to_string(tData.tr.td.contents)
headercolumns = tData.tr.findNextSibling()
if(skipTitle):
headercolumns = tData.findAll('tr')[0]
title= customtitle
i = 0
for col in headercolumns.findAll('td'):
column_names[i] = array_to_string(col.contents)
i=i+1
datacol = headercolumns.findNextSibling()
index = 0
while True:
if (datacol == None):
break
column = {}
i = 0
for col in datacol.findAll('td'):
column[column_names[i]] = array_to_string(col.contents)
i=i+1
if(indexCol != None):
if asInt == False:
tableData[column[indexCol]] = column
else:
tableData[int(column[indexCol])] = column
else:
tableData[index] = column
datacol = datacol.findNextSibling()
index = index + 1
return title,tableData
#Removes all the tags we don't want to see
def sanitize_html(value):
soup = BeautifulSoup(value)
for tag in soup.findAll(True):
if tag.name not in VALID_TAGS:
tag.hidden = True
return soup
#strips off blanks
def remove_blank_lines(value):
return string.strip(value)
#puts the array of things into a nice clean string, makes stuff look pretty.
def array_to_string(value):
string = ''
for row in value:
string+= str(row)
return remove_blank_lines(string)
| Python |
#This change is just to see if Subversion is set up right.
import FRCParser
import re
import time
import cgi
from django.utils import simplejson
from xml.dom.minidom import Document
from string import Template
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
from google.appengine.api import memcache
default_year = "2009"
rankings_cache = 60
awards_cache = 600
schedule_cache = 60
results_cache = 60
#Stores the event codes and their common names.
event_codes = {
"arizona":"az",
"az":"az",
"autodesk oregon":"or",
"or":"or",
"bae granite state":"nh",
"nh":"nh",
"bayou":"la",
"la":"la",
"boilermaker":"in",
"in":"in",
"boston":"ma",
"ma":"ma",
"buckeye":"oh",
"oh":"oh",
"chesapeake":"md",
"ma":"ma",
"colorado":"co",
"co":"co",
"connecticut":"ct",
"ct":"ct",
"dallas":"da",
"da":"da",
"finger lakes":"roc",
"roc":"roc",
"greater kansas city":"kc",
"kc":"kc",
"greater toronto":"on",
"on":"on",
"hawaii":"hi",
"hi":"hi",
"isreal":"is",
"is":"is",
"las vegas":"nv",
"nv":"nv",
"lone star":"tx",
"tx":"tx",
"los angeles":"ca",
"ca":"ca",
"microsoft seattle":"wa",
"wa":"wa",
"midwest":"il",
"il":"il",
"minnesota 10000 lakes":"mn",
"mn":"mn",
"minnesota north star":"mn2",
"mn2":"mn2",
"new jersey":"nj",
"nj":"nj",
"new york city":"ny",
"ny":"ny",
"north carolina":"nc",
"nc":"nc",
"oklahoma":"ok",
"ok":"ok",
"palmetto":"ga",
"ga":"ga",
"pittsburgh":"pit",
"pit":"pit",
"sacramento":"sac",
"sac":"sac",
"san diego":"sdc",
"sdc":"sdc",
"sbpli long island":"li",
"li":"li",
"silicon valley":"sj",
"sj":"sj",
"st. louis":"mo",
"mo":"mo",
"utah":"ut",
"ut":"ut",
"virginia":"va",
"va":"va",
"washington dc":"dc",
"dc":"dc",
"waterloo":"wat",
"wat":"wat",
"wisconsin":"wi",
"wi":"wi",
"wpi":"wor",
"wor":"wor",
"ann arbor":"wc",
"wc":"wc",
"cass tech":"dt1",
"dt1":"dt1",
"detroit":"dt",
"dt":"dt",
"kettering":"gg",
"gg":"gg",
"traverse city":"gt",
"gt":"gt",
"troy":"oc",
"oc":"oc",
"west michigan":"mi",
"mi":"mi",
"michigan state championship":"gl",
"gl":"gl",
"championship":"cmp",
"cmp":"cmp",
"archimedes":"arc",
"arc":"arc",
"curie":"cur",
"cur":"cur",
"galileo":"gal",
"gal":"gal",
"newton":"new",
"new":"new",
"einstein":"ein",
"ein":"ein"
}
what = {
"schedule":{"url":"http://www2.usfirst.org/${Year}comp/events/${Event}/schedule${When_Short}.html", "Table":"${When} Schedule","Index_Column":"Match","Skip_Title":False,"Custom_Title":None,"As_Int":True,"Require_When":True,"Cache_Time":schedule_cache},
"rankings":{"url":"http://www2.usfirst.org/${Year}comp/events/${Event}/rankings.html", "Table":"2","Index_Column":"Rank","Skip_Title":True,"Custom_Title":None,"As_Int":True,"Require_When":False,"Cache_Time":rankings_cache},
"awards":{"url":"http://www2.usfirst.org/${Year}comp/events/${Event}/awards.html", "Table":"2","Index_Column":"Award","Skip_Title":True,"Custom_Title":None,"As_Int":False,"Require_When":False,"Cache_Time":awards_cache},
"results":{"url":"http://www2.usfirst.org/${Year}comp/events/${Event}/matchresults.html", "Table":"${When} Matches","Index_Column":"Match","Skip_Title":False,"Custom_Title":None,"As_Int":True,"Require_When":True,"Cache_Time":results_cache}
}
when = ["qualification","elimination"]
format = ["xml","json","human"]
year = ["2009","2008","2007"]
what_error = "You did not specify what you wanted.<br/>"
event_error = "You did not specify an event.<br/>"
def parse_request(handler):
request = {}
args = handler.request.path.lower().split("/")
have_year = False
have_event = False
have_what = False
have_when = False
need_when = False
have_format = False
index = 1
request["When"] = when[0]
request["Year"] = year[0]
request["Format"] = format[0]
request["What"] = what.keys()[0]
for el in args:
if el in event_codes:
request["Event"] = event_codes[el]
have_event = True
if el in year:
request["Year"] = el
have_year = True
if el in what.keys() and not have_what:
request["What"] = el
need_when = what[el]["Require_When"]
have_what = True
if el in when and not have_when:
request["When"] = el
request["When_Short"] = el[:4]
have_when = True
if el in format and not have_format:
request["Format"] = el
have_format = True
if len(el.split("-"))>1:
request["Begin"] = el.split("-")[0]
request["End"] = el.split("-")[1]
if request["Begin"] == "":
request["Begin"] = None
if request["End"] == "":
request["End"] = None
request["Split"] = True
if (need_when and not have_when) and not (have_year and have_event and have_what) :
request["Error"] = True
return request
def slice(data,beg = None,end = None):
data2 = {}
if beg != None:
beg = int(beg) - 1
if beg == None:
beg = 0
if end != None:
end = int(end)
if beg < 0:
beg = 0
if end <= beg:
end = beg
for el in data.keys()[beg:end]:
data2[el] = data[el]
return data2
def XML_escape(value):
return re.sub(" ","_",str(value))
def remove_tags(value):
return re.sub(" ","",re.sub("<.*?>","",value))
class FRCFeed(webapp.RequestHandler):
def display_index(self):
string = "Please use frcfeed.appspot.com/[Year]/[Event]/[Qualification or Elimination]/[Awards or Schedule or Rankings or Results]"
self.response.out.write(string)
def generate_response(self, request):
Memcache_Name = Template("${Year} ${Event} ${When} ${What}").substitute(request)
data = memcache.get(Memcache_Name)
What_Dictionary = what[request["What"]]
Index_Column = What_Dictionary["Index_Column"]
if data == None:
url = Template(What_Dictionary["url"]).substitute(request)
HTML = urlfetch.fetch(url).content
Skip_Title = What_Dictionary["Skip_Title"]
As_Int = What_Dictionary["As_Int"]
data = FRCParser.parse(HTML,Index_Column,Skip_Title,As_Int)
memcache.add(Memcache_Name,data,What_Dictionary["Cache_Time"])
Table_Name = Template(What_Dictionary["Table"]).substitute(request).title()
if Table_Name.isdigit():
Table_Name= int(Table_Name)
if "Split" in request and What_Dictionary["As_Int"]:
data[Table_Name] = slice(data[Table_Name],request["Begin"],request["End"])
self.print_response(request,data[Table_Name],Index_Column)
def print_response(self,request,data,indexValue):
if request["Format"] == "json":
self.response.out.write(re.sub("<.*?>","",simplejson.dumps(data)))
if request["Format"] == "xml":
doc = Document()
Event = doc.createElement("Event")
doc.appendChild(Event)
Sorted_Keys = data.keys()
Sorted_Keys.sort()
for element in Sorted_Keys:
xml_element = doc.createElement(indexValue)
xml_element.setAttribute("id",XML_escape(element))
Event.appendChild(xml_element)
for content in data[element].keys():
xml_content = doc.createElement(XML_escape(content))
info = doc.createTextNode(str(data[element][content]))
xml_content.appendChild(info)
xml_element.appendChild(xml_content)
self.response.out.write(doc.toprettyxml(indent = " "))
if request["Format"] == "human":
doc = Document()
Event = doc.createElement("table")
doc.appendChild(Event)
Sorted_Keys = data.keys()
Sorted_Keys.sort()
for element in Sorted_Keys:
xml_element = doc.createElement("tr")
xml_element.setAttribute("id",XML_escape(element))
Event.appendChild(xml_element)
for content in data[element].keys():
xml_content = doc.createElement("td")
info = doc.createTextNode(str(data[element][content]))
xml_content.appendChild(info)
xml_element.appendChild(xml_content)
self.response.out.write(doc.toprettyxml(indent = " "))
def get(self):
request = parse_request(self)
try:
self.generate_response(request)
except:
self.display_index()
application = webapp.WSGIApplication(
[('.*', FRCFeed)],debug=True)
def main():
try:
run_wsgi_app(application)
except:
pass
if __name__ == "__main__":
main() | Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2009, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.1.0.1"
__copyright__ = "Copyright (c) 2004-2009 Leonard Richardson"
__license__ = "New-style BSD"
import codecs
import markupbase
import types
import re
from HTMLParser import HTMLParser, HTMLParseError
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
def sob(unicode, encoding):
"""Returns either the given Unicode string or its encoding."""
if encoding is None:
return unicode
else:
return unicode.encode(encoding)
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.decode().encode(encoding)
def decodeGivenEventualEncoding(self, eventualEncoding):
return self
class CData(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<![CDATA[' + self + u']]>'
class ProcessingInstruction(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
output = self
if u'%SOUP-ENCODING%' in output:
output = self.substituteEncoding(output, eventualEncoding)
return u'<?' + output + u'?>'
class Comment(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!--' + self + u'-->'
class Declaration(NavigableString):
def decodeGivenEventualEncoding(self, eventualEncoding):
return u'<!' + self + u'>'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
def convert(kval):
"Converts HTML, XML and numeric entities in the attribute value."
k, val = kval
if val is None:
return kval
return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities, val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.decode(eventualEncoding=encoding)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decode(prettyPrint, indentLevel, encoding).encode(encoding)
def decode(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding."""
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if (self.containsSubstitutions
and eventualEncoding is not None
and '%SOUP-ENCODING%' in val):
val = self.substituteEncoding(val, eventualEncoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
if val is None:
# Handle boolean attributes.
decoded = key
else:
decoded = fmt % (key, val)
attrs.append(decoded)
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % self.name
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.decodeContents(prettyPrint, indentContents,
eventualEncoding)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (self.name, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.encode(encoding, True)
def encodeContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
return self.decodeContents(prettyPrint, indentLevel).encode(encoding)
def decodeContents(self, prettyPrint=False, indentLevel=0,
eventualEncoding=DEFAULT_OUTPUT_ENCODING):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.decodeGivenEventualEncoding(eventualEncoding)
elif isinstance(c, Tag):
s.append(c.decode(prettyPrint, indentLevel, eventualEncoding))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods. Will go away in 4.0.
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
# 3.x compatibility methods. Will go away in 4.0.
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if encoding is None:
return self.decodeContents(prettyPrint, indentLevel, encoding)
else:
return self.encodeContents(encoding, prettyPrint, indentLevel)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
def childGenerator(self):
if not len(self.contents):
raise StopIteration
current = self.contents[0]
while current:
yield current
current = current.nextSibling
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup is not None and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif (isList(matchAgainst)
and (markup is not None or not isString(matchAgainst))):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return ((hasattr(l, '__iter__') and not isString(l))
or (type(l) in (types.ListType, types.TupleType)))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion) and not isString(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class HTMLParserBuilder(HTMLParser):
def __init__(self, soup):
HTMLParser.__init__(self)
self.soup = soup
# We inherit feed() and reset().
def handle_starttag(self, name, attrs):
if name == 'meta':
self.soup.extractCharsetFromMeta(attrs)
else:
self.soup.unknown_starttag(name, attrs)
def handle_endtag(self, name):
self.soup.unknown_endtag(name)
def handle_data(self, content):
self.soup.handle_data(content)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.soup.endData()
self.handle_data(text)
self.soup.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.soup.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.soup.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.soup.convertXMLEntities:
data = self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.soup.convertHTMLEntities and \
not self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = HTMLParser.parse_declaration(self, i)
except HTMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulStoneSoup(Tag):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False,
builder=HTMLParserBuilder):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
HTMLParser will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
HTMLParser, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke HTMLParser:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
self.builder = builder(self)
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed.
self.builder = None # So can the builder.
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.builder.reset()
self.builder.feed(markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def extractCharsetFromMeta(self, attrs):
self.unknown_starttag('meta', attrs)
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def extractCharsetFromMeta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity."""
orig = match.group(1)
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
smart_quotes_re = "([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._subMSChar, markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_re = '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode()
xml_encoding_match = re.compile(xml_encoding_re).match(xml_data)
if not xml_encoding_match and isHTML:
meta_re = '<\s*meta[^>]+charset=([^>]*?)[;\'">]'.encode()
regexp = re.compile(meta_re, re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].decode(
'ascii').lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
FRCParser.py
Created by Andrew Schreiber on 2009-10-17.
"""
from BeautifulSoup import BeautifulSoup
import re
import string
#We only want certain tags, all others will be removed. Basically this makes our life easier
VALID_TAGS = ['table', 'tr', 'td']
#content is the contents, as a string.
#all other options merely pass them through to the parse_table function.
def parse(content,indexCol = 'Match',skipTitle = False, asInt = False):
content = re.sub("!","",content)
tables = sanitize_html(content).findAll('table')
data = {}
for i in range(0,len(tables)):
try:
table_data = parse_table(tables[i],indexCol,skipTitle,i,asInt)
data[table_data[0]] = table_data[1]
except:
pass
return data
#Return an associated array of each element in the table
#tData is the table data, it is actually a BeautifulSoup Node
#indexCol specifies the Name of the column that will be used, if none is specified ints will be used.
#skipTitle tells the parser that there is no title on the table. For example Awards has no title but Qualification Schedule does.
#customTitle lets you tell the parser what the title of this table is.
#asInt tells us to cast the indexCol as an Int.
def parse_table(tData,indexCol=None,skipTitle = False, customtitle='Unknown',asInt=False):
column_names = {}
tableData = {}
title = array_to_string(tData.tr.td.contents)
headercolumns = tData.tr.findNextSibling()
if(skipTitle):
headercolumns = tData.findAll('tr')[0]
title= customtitle
i = 0
for col in headercolumns.findAll('td'):
column_names[i] = array_to_string(col.contents)
i=i+1
datacol = headercolumns.findNextSibling()
index = 0
while True:
if (datacol == None):
break
column = {}
i = 0
for col in datacol.findAll('td'):
column[column_names[i]] = array_to_string(col.contents)
i=i+1
if(indexCol != None):
if asInt == False:
tableData[column[indexCol]] = column
else:
tableData[int(column[indexCol])] = column
else:
tableData[index] = column
datacol = datacol.findNextSibling()
index = index + 1
return title,tableData
#Removes all the tags we don't want to see
def sanitize_html(value):
soup = BeautifulSoup(value)
for tag in soup.findAll(True):
if tag.name not in VALID_TAGS:
tag.hidden = True
return soup
#strips off blanks
def remove_blank_lines(value):
return string.strip(value)
#puts the array of things into a nice clean string, makes stuff look pretty.
def array_to_string(value):
string = ''
for row in value:
string+= str(row)
return remove_blank_lines(string)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCB-provided custom tags."""
__author__ = 'John Orr (jorr@google.com)', 'Aparna Kadakia (akadakia@google.com)'
import urllib
import urlparse
from common import schema_fields
from common import tags
from models import courses
from xml.etree import cElementTree
def _escape_url(url):
"""Escapes/quotes url parts to sane user input; force https."""
scheme, netloc, path, query, unused_fragment = urlparse.urlsplit(url)
scheme = 'https'
path = urllib.quote(path)
query = urllib.quote_plus(query, '=?&;')
return urlparse.urlunsplit((scheme, netloc, path, query, unused_fragment))
class GoogleDoc(tags.BaseTag):
"""Custom tag for a Google Doc."""
@classmethod
def name(cls):
return'Google Doc'
def render(self, node):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url('%s?embedded=true' % link)
iframe = cElementTree.XML("""
<iframe class="google-doc" title="Google Doc" type="text/html" frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return '/extensions/tags/gcb/resources/docs.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleDoc.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the Document link.
# Changes to the publication status of a document or to its contents
# do not appear instantly.
schema_fields.SchemaField(
'link', 'Document Link', 'string',
optional=True,
description=('Provide the "Document Link" from the Google Docs '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string',
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the document, in pixels. Width will be '
'set automatically')))
return reg
class GoogleSpreadsheet(tags.BaseTag):
"""Custom tag for a Google Spreadsheet."""
@classmethod
def name(cls):
return'Google Spreadsheet'
def render(self, node):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url('%s&chrome=false' % link.split('&output')[0])
iframe = cElementTree.XML("""
<iframe class="google-spreadsheet" title="Google Spreadsheet" type="text/html"
frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return '/extensions/tags/gcb/resources/spreadsheets.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleSpreadsheet.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the link above 'Copy
# and paste the link above'. Changes to the publication status of a
# document or to its contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Link', 'string',
optional=True,
description=('Provide the link from the Google Spreadsheets '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string',
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the spreadsheet, in pixels. Width will '
'be set automatically')))
return reg
class YouTube(tags.BaseTag):
@classmethod
def name(cls):
return'YouTube Video'
def render(self, node):
video_id = node.attrib.get('videoid')
you_tube_url = (
'https://www.youtube.com/embed/%s'
'?feature=player_embedded&rel=0') % video_id
iframe = cElementTree.XML("""
<p class="gcb-video-container">
<iframe class="youtube-player" title="YouTube Video Player"
type="text/html" width="650" height="400" frameborder="0"
allowfullscreen="allowfullscreen">
</iframe>
</p>""")
iframe[0].set('src', you_tube_url)
return iframe
def get_icon_url(self):
"""Return the URL for the icon to be displayed in the rich text editor.
Images should be placed in a folder called 'resources' inside the main
package for the tag definitions."""
return '/extensions/tags/gcb/resources/youtube.png'
def get_schema(self, unused_handler):
"""Return the list of fields which will be displayed in the editor.
This method assembles the list of fields which will be displayed in
the rich text editor when a user double-clicks on the icon for the tag.
The fields are a list of SchemaField objects in a FieldRegistry
container. Each SchemaField has the actual attribute name as used in the
tag, the display name for the form, and the type (usually string)."""
reg = schema_fields.FieldRegistry(YouTube.name())
reg.add_property(
schema_fields.SchemaField('videoid', 'Video Id', 'string',
optional=True,
description='Provide YouTube video ID (e.g. Kdg2drcUjYI)'))
return reg
class GoogleGroup(tags.BaseTag):
@classmethod
def name(cls):
return 'Google Group'
def render(self, node):
group_name = node.attrib.get('group')
category_name = node.attrib.get('category')
embedded_forum_url = (
'https://groups.google.com/forum/embed/?place=forum/?'
'fromgroups&hl=en#!categories/%s/%s') \
% (urllib.quote(group_name), urllib.quote(category_name))
iframe = cElementTree.XML("""
<p>
<iframe class="forum-embed" title="Google Group Embed"
type="text/html" width="700" height="300" frameborder="0">
</iframe>
</p>""")
iframe[0].set('src', embedded_forum_url)
return iframe
def get_icon_url(self):
return '/extensions/tags/gcb/resources/forumembed.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleGroup.name())
reg.add_property(
schema_fields.SchemaField(
'group', 'Group Name', 'string', optional=True,
description='Name of the Google Group (e.g. mapping-with-google)'))
reg.add_property(
schema_fields.SchemaField(
'category', 'Category Name', 'string', optional=True,
description='Name of the Category (e.g. unit5-2-annotation)'))
return reg
class Activity(tags.BaseTag):
def render(self, node):
activity_id = node.attrib.get('activityid')
script = cElementTree.XML("""
<div>
<script></script>
<div style="width: 785px;" id="activityContents"></div>
</div>""")
script[0].set('src', 'assets/js/%s' % activity_id)
return script
def get_icon_url(self):
return '/extensions/tags/gcb/resources/activity.png'
def get_schema(self, handler):
course = courses.Course(handler)
lesson_id = handler.request.get('lesson_id')
activity_list = []
for unit in course.get_units():
for lesson in course.get_lessons(unit.unit_id):
filename = 'activity-%s.js' % lesson.lesson_id
if lesson.has_activity:
if lesson.activity_title:
title = lesson.activity_title
else:
title = filename
name = '%s - %s (%s) ' % (unit.title, lesson.title, title)
activity_list.append((filename, name))
elif str(lesson.lesson_id) == lesson_id:
name = 'Current Lesson (%s)' % filename
activity_list.append((filename, name))
reg = schema_fields.FieldRegistry('Activity')
reg.add_property(
schema_fields.SchemaField(
'activityid', 'Activity Id', 'select', optional=True,
select_data=activity_list,
description=(
'The ID of the activity (e.g. activity-2.4.js). '
'Note /assets/js/ is not required')))
return reg
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic object editor view that uses REST services."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import urllib
import appengine_config
from common import jinja_filters
from common import schema_fields
from common import tags
from controllers import utils
import jinja2
from models import custom_modules
from models import transforms
import webapp2
# a set of YUI and inputex modules required by the editor
COMMON_REQUIRED_MODULES = [
'inputex-group', 'inputex-form', 'inputex-jsonschema']
ALL_MODULES = [
'querystring-stringify-simple', 'inputex-select', 'inputex-string',
'inputex-radio', 'inputex-date', 'inputex-datepicker', 'inputex-checkbox',
'inputex-list', 'inputex-color', 'gcb-rte', 'inputex-textarea',
'inputex-url', 'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-file', 'io-upload-iframe']
class ObjectEditor(object):
"""Generic object editor powered by jsonschema."""
@classmethod
def get_html_for(
cls, handler, schema_json, annotations, object_key,
rest_url, exit_url,
extra_args=None,
save_method='put',
delete_url=None, delete_method='post',
auto_return=False, read_only=False,
required_modules=None,
extra_js_files=None,
save_button_caption='Save',
exit_button_caption='Close'):
"""Creates an HTML code needed to embed and operate this form.
This method creates an HTML, JS and CSS required to embed JSON
schema-based object editor into a view.
Args:
handler: a BaseHandler class, which will host this HTML, JS and CSS
schema_json: a text of JSON schema for the object being edited
annotations: schema annotations dictionary
object_key: a key of an object being edited
rest_url: a REST endpoint for object GET/PUT operation
exit_url: a URL to go to after the editor form is dismissed
extra_args: extra request params passed back in GET and POST
save_method: how the data should be saved to the server (put|upload)
delete_url: optional URL for delete operation
delete_method: optional HTTP method for delete operation
auto_return: whether to return to the exit_url on successful save
read_only: optional flag; if set, removes Save and Delete operations
required_modules: list of inputex modules required for this editor
extra_js_files: list of extra JS files to be included
save_button_caption: a caption for the 'Save' button
exit_button_caption: a caption for the 'Close' button
Returns:
The HTML, JS and CSS text that will instantiate an object editor.
"""
required_modules = required_modules or ALL_MODULES
# extract label
type_label = transforms.loads(schema_json).get('description')
if not type_label:
type_label = 'Generic Object'
# construct parameters
get_url = rest_url
get_args = {'key': object_key}
post_url = rest_url
post_args = {'key': object_key}
if extra_args:
get_args.update(extra_args)
post_args.update(extra_args)
if read_only:
post_url = ''
post_args = ''
custom_rte_tag_icons = []
for tag, tag_class in tags.get_tag_bindings().items():
custom_rte_tag_icons.append({
'name': tag,
'iconUrl': tag_class().get_icon_url()})
template_values = {
'enabled': custom_module.enabled,
'schema': schema_json,
'type_label': type_label,
'get_url': '%s?%s' % (get_url, urllib.urlencode(get_args, True)),
'save_url': post_url,
'save_args': transforms.dumps(post_args),
'exit_button_caption': exit_button_caption,
'exit_url': exit_url,
'required_modules': COMMON_REQUIRED_MODULES + required_modules,
'extra_js_files': extra_js_files or [],
'schema_annotations': [
(item[0], transforms.dumps(item[1])) for item in annotations],
'save_method': save_method,
'auto_return': auto_return,
'save_button_caption': save_button_caption,
'custom_rte_tag_icons': transforms.dumps(custom_rte_tag_icons)
}
if delete_url and not read_only:
template_values['delete_url'] = delete_url
if delete_method:
template_values['delete_method'] = delete_method
if appengine_config.BUNDLE_LIB_FILES:
template_values['bundle_lib_files'] = True
return jinja2.utils.Markup(handler.get_template(
'oeditor.html', [os.path.dirname(__file__)]
).render(template_values))
class PopupHandler(webapp2.RequestHandler, utils.ReflectiveRequestHandler):
"""A handler to serve the content of the popup subeditor."""
default_action = 'custom_tag'
get_actions = ['edit_custom_tag', 'add_custom_tag']
post_actions = []
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
jinja_environment = jinja2.Environment(
autoescape=True, finalize=jinja_filters.finalize,
loader=jinja2.FileSystemLoader(dirs + [os.path.dirname(__file__)]))
jinja_environment.filters['js_string'] = jinja_filters.js_string
return jinja_environment.get_template(template_name)
def get_edit_custom_tag(self):
"""Return the the page used to edit a custom HTML tag in a popup."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
tag_class = tag_bindings[tag_name]
schema = tag_class().get_schema(self)
if schema.has_subregistries():
raise NotImplementedError()
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None)
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def get_add_custom_tag(self):
"""Return the page for the popup used to add a custom HTML tag."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
select_data = []
for name in tag_bindings.keys():
clazz = tag_bindings[name]
select_data.append((name, '%s: %s' % (
clazz.vendor(), clazz.name())))
select_data = sorted(select_data, key=lambda pair: pair[1])
if tag_name:
tag_class = tag_bindings[tag_name]
else:
tag_class = tag_bindings[select_data[0][0]]
tag_schema = tag_class().get_schema(self)
schema = schema_fields.FieldRegistry('Add a Component')
type_select = schema.add_sub_registry('type', 'Component Type')
type_select.add_property(schema_fields.SchemaField(
'tag', 'Name', 'select', select_data=select_data))
schema.add_sub_registry('attributes', registry=tag_schema)
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None, extra_js_files=['add_custom_tag.js'])
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def create_bool_select_annotation(
keys_list, label, true_label, false_label, class_name=None,
description=None):
"""Creates inputex annotation to display bool type as a select."""
properties = {
'label': label, 'choices': [
{'value': True, 'label': true_label},
{'value': False, 'label': false_label}]}
if class_name:
properties['className'] = class_name
if description:
properties['description'] = description
return (keys_list, {'type': 'select', '_inputex': properties})
custom_module = None
def register_module():
"""Registers this module in the registry."""
from controllers import sites # pylint: disable-msg=g-import-not-at-top
yui_handlers = [
('/static/inputex-3.1.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'))),
('/static/yui_3.6.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'))),
('/static/2in3/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip')))]
if appengine_config.BUNDLE_LIB_FILES:
yui_handlers += [
('/static/combo/inputex', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'),
'/static/inputex-3.1.0/')),
('/static/combo/yui', sites.make_css_combo_zip_handler(
os.path.join(appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'),
'/yui/')),
('/static/combo/2in3', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip'),
'/static/2in3/'))]
oeditor_handlers = [('/oeditorpopup', PopupHandler)]
global custom_module
custom_module = custom_modules.Module(
'Object Editor',
'A visual editor for editing various types of objects.',
yui_handlers, oeditor_handlers)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oauth2 module implementation.
In order to use this module with your app you must enable it in main.py by
changing
modules.oauth2.oauth2.register_module()
to
modules.oauth2.oauth2.register_module().enable()
Additionally, you must:
1. Visit https://code.google.com/apis/console. Click on API Access and create a
client id for your web app with redirect URI set to:
https://<appid>.appspot|googleplex.com/<callback_uri>
and optionally include
http://localhost:<port>/<callback_uri>
where <appid> is your app id, <callback_uri> is the oauth2 callback URI you'd
like to use, and <port> is the port you'd like to use for localhost. You can
set <port> and <callback_uri> to basically whatever you want as long as they
are unique.
2. Once you've created the client id, click Download JSON. Take the file you get
and overwrite client_secrets.json in this directory.
3. In https://code.google.com/apis/console, click on Services and enable the
services your app requires. For these demos, you'll need to enable Drive API
and Google+.
Whenever you change scopes you'll need to revoke your access tokens. You can do
this at https://accounts.google.com/b/0/IssuedAuthSubTokens.
You can find a list of the available APIs at
http://api-python-client-doc.appspot.com/.
Finally, a note about dependencies. Oauth2 requires google-api-python-client,
which you can find at https://code.google.com/p/google-api-python-client/. We
bundle version 1.1 with Course Builder. It requires httplib2, which you can find
at https://code.google.com/p/httplib2/. We bundle version 0.8 with Course
Builder.
It also requires python-gflags from https://code.google.com/p/python-gflags/. We
bundle 2.0 with Course Builder, and we've repackaged the downloadable .tar.gz as
a .zip so Python can load its contents directly from sys.path.
Good luck!
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import os
import traceback
from apiclient import discovery
from common import safe_dom
from models import custom_modules
from oauth2client import appengine
import webapp2
# In real life we'd check in a blank file and set up the code to error with a
# message pointing people to https://code.google.com/apis/console.
_CLIENTSECRETS_JSON_PATH = os.path.join(
os.path.dirname(__file__), 'client_secrets.json')
class _ErrorDecorator(object):
"""Decorator used when a real decorator cannot be created.
Most often this is because there is no valid client_secrets.json. This
decorator replaces the wrapped method with one that either is a no-op, or,
if an error was given, displays the error.
"""
def __init__(self, **kwargs):
self.callback_path = 'not_enabled'
self.error = kwargs.pop('error', '')
def callback_handler(self):
"""Stub for API compatibility."""
pass
def oauth_required(self, unused_method):
"""Prints an error messsage and exits with a 500."""
def print_error_and_return_500(
request_handler, *unused_args, **unused_kwargs):
contents = safe_dom.NodeList().append(
safe_dom.Element('h1').add_text('500 internal server error')
).append(
safe_dom.Element('pre').add_text(self.error)
)
request_handler.response.write(contents.sanitized)
request_handler.response.status = 500
return print_error_and_return_500
# In real life we'd want to make one decorator per service because we wouldn't
# want users to have to give so many permissions.
def _build_decorator():
"""Builds a decorator for using oauth2 with webapp2.RequestHandlers."""
try:
return appengine.oauth2decorator_from_clientsecrets(
_CLIENTSECRETS_JSON_PATH,
scope=[
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/plus.login',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
],
message='client_secrets.json missing')
# Deliberately catch everything. pylint: disable-msg=broad-except
except Exception as e:
display_error = (
'oauth2 module enabled, but unable to load client_secrets.json. '
'See docs in modules/oauth2.py. Original exception was:\n\n%s') % (
traceback.format_exc(e))
return _ErrorDecorator(error=display_error)
_DECORATOR = _build_decorator()
class ServiceHandler(webapp2.RequestHandler):
def build_service(self, oauth2_decorator, name, version):
http = oauth2_decorator.credentials.authorize(oauth2_decorator.http())
return discovery.build(name, version, http=http)
class GoogleDriveHandler(ServiceHandler):
@_DECORATOR.oauth_required
def get(self):
drive = self.build_service(_DECORATOR, 'drive', 'v2')
about = drive.about().get().execute()
self.response.write('Drive sees you as ' + about['user']['displayName'])
class GoogleOauth2Handler(ServiceHandler):
@_DECORATOR.oauth_required
def get(self):
oauth2 = self.build_service(_DECORATOR, 'oauth2', 'v2')
userinfo = oauth2.userinfo().get().execute()
self.response.write('Oauth2 sees you as ' + userinfo['name'])
class GooglePlusHandler(ServiceHandler):
@_DECORATOR.oauth_required
def get(self):
plus = self.build_service(_DECORATOR, 'plus', 'v1')
# This call will barf if you're logged in as @google.com because your
# profile will not be fetchable. Log in as @gmail.com and you'll be
# fine.
me = plus.people().get(userId='me').execute()
self.response.write('Plus sees you as ' + me['displayName'])
# None or custom_modules.Module. Placeholder for the module created by
# register_module.
module = None
def register_module():
"""Adds this module to the registry."""
global module
handlers = [
('/oauth2_google_drive', GoogleDriveHandler),
('/oauth2_google_oauth2', GoogleOauth2Handler),
('/oauth2_google_plus', GooglePlusHandler),
(_DECORATOR.callback_path, _DECORATOR.callback_handler()),
]
module = custom_modules.Module('Oauth2', 'Oauth2 pages', handlers, [])
return module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the admin panel."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
def assemble_sanitized_message(text, link):
node_list = safe_dom.NodeList()
if text:
node_list.append(safe_dom.Text(text))
if link:
node_list.append(safe_dom.Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
COURSES_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateNewCourse')
DEPLOYMENT_DESCRIPTION = assemble_sanitized_message("""
These deployment settings are configurable by editing the Course Builder code
before uploading it to Google App Engine.
""", 'https://code.google.com/p/course-builder/wiki/AdminPage')
METRICS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/AdminPage')
SETTINGS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/AdminPage')
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting configuration property editor and REST operations."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import urllib
from controllers import sites
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import models
from models import roles
from models import transforms
from modules.oeditor import oeditor
from google.appengine.api import users
from google.appengine.ext import db
# This is a template because the value type is not yet known.
SCHEMA_JSON_TEMPLATE = """
{
"id": "Configuration Property",
"type": "object",
"description": "Configuration Property Override",
"properties": {
"name" : {"type": "string"},
"value": {"optional": true, "type": "%s"},
"is_draft": {"type": "boolean"}
}
}
"""
# This is a template because the doc_string is not yet known.
SCHEMA_ANNOTATIONS_TEMPLATE = [
(['title'], 'Configuration Property Override'),
(['properties', 'name', '_inputex'], {
'label': 'Name', '_type': 'uneditable'}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Pending', 'Active',
description='<strong>Active</strong>: This value is active and '
'overrides all other defaults.<br/><strong>Pending</strong>: This '
'value is not active yet, and the default settings still apply.')]
class ConfigPropertyRights(object):
"""Manages view/edit rights for configuration properties."""
@classmethod
def can_view(cls):
return cls.can_edit()
@classmethod
def can_edit(cls):
return roles.Roles.is_super_admin()
@classmethod
def can_delete(cls):
return cls.can_edit()
@classmethod
def can_add(cls):
return cls.can_edit()
class ConfigPropertyEditor(object):
"""An editor for any configuration property."""
# Map of configuration property type into inputex type.
type_map = {str: 'string', int: 'integer', bool: 'boolean'}
@classmethod
def get_schema_annotations(cls, config_property):
"""Gets editor specific schema annotations."""
doc_string = '%s Default: \'%s\'.' % (
config_property.doc_string, config_property.default_value)
item_dict = [] + SCHEMA_ANNOTATIONS_TEMPLATE
item_dict.append((
['properties', 'value', '_inputex'], {
'label': 'Value', '_type': '%s' % cls.get_value_type(
config_property),
'description': doc_string}))
return item_dict
@classmethod
def get_value_type(cls, config_property):
"""Gets an editor specific type for the property."""
value_type = cls.type_map[config_property.value_type]
if not value_type:
raise Exception('Unknown type: %s', config_property.value_type)
if config_property.value_type == str and config_property.multiline:
return 'text'
return value_type
@classmethod
def get_schema_json(cls, config_property):
"""Gets JSON schema for configuration property."""
return SCHEMA_JSON_TEMPLATE % cls.get_value_type(config_property)
def get_add_course(self):
"""Handles 'add_course' action and renders new course entry editor."""
exit_url = '/admin?action=courses'
rest_url = CoursesItemRESTHandler.URI
template_values = {}
template_values[
'page_title'] = 'Course Builder - Add Course'
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, CoursesItemRESTHandler.SCHEMA_JSON,
CoursesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Add New Course')
self.render_page(template_values)
def get_config_edit(self):
"""Handles 'edit' property action."""
key = self.request.get('name')
if not key:
self.redirect('/admin?action=settings')
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
template_values = {}
template_values[
'page_title'] = 'Course Builder - Edit Settings'
exit_url = '/admin?action=settings#%s' % cgi.escape(key)
rest_url = '/rest/config/item'
delete_url = '/admin?%s' % urllib.urlencode({
'action': 'config_reset',
'name': key,
'xsrf_token': cgi.escape(self.create_xsrf_token('config_reset'))})
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, ConfigPropertyEditor.get_schema_json(item),
ConfigPropertyEditor.get_schema_annotations(item),
key, rest_url, exit_url, delete_url=delete_url)
self.render_page(template_values)
def post_config_override(self):
"""Handles 'override' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Add new entity if does not exist.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
except db.BadKeyError:
entity = None
if not entity:
entity = config.ConfigPropertyEntity(key_name=name)
entity.value = str(item.value)
entity.is_draft = True
entity.put()
models.EventEntity.record(
'override-property', users.get_current_user(), transforms.dumps({
'name': name, 'value': str(entity.value)}))
self.redirect('/admin?%s' % urllib.urlencode(
{'action': 'config_edit', 'name': name}))
def post_config_reset(self):
"""Handles 'reset' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('/admin?action=settings')
# Delete if exists.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
if entity:
old_value = entity.value
entity.delete()
models.EventEntity.record(
'delete-property', users.get_current_user(),
transforms.dumps({
'name': name, 'value': str(old_value)}))
except db.BadKeyError:
pass
self.redirect('/admin?action=settings')
class CoursesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for course entries."""
URI = '/rest/courses/item'
SCHEMA_JSON = """
{
"id": "Course Entry",
"type": "object",
"description": "Course Entry",
"properties": {
"name": {"type": "string"},
"title": {"type": "string"},
"admin_email": {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'New Course Entry'),
(['properties', 'name', '_inputex'], {'label': 'Unique Name'}),
(['properties', 'title', '_inputex'], {'label': 'Course Title'}),
(['properties', 'admin_email', '_inputex'], {
'label': 'Course Admin Email'})]
def get(self):
"""Handles HTTP GET verb."""
if not ConfigPropertyRights.can_view():
transforms.send_json_response(
self, 401, 'Access denied.')
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={
'name': 'new_course',
'title': 'My New Course',
'admin_email': self.get_user().email()},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'add-course-put'))
def put(self):
"""Handles HTTP PUT verb."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'add-course-put', {}):
return
if not ConfigPropertyRights.can_edit():
transforms.send_json_response(
self, 401, 'Access denied.')
return
payload = request.get('payload')
json_object = transforms.loads(payload)
name = json_object.get('name')
title = json_object.get('title')
admin_email = json_object.get('admin_email')
# Add the new course entry.
errors = []
entry = sites.add_new_course_entry(name, title, admin_email, errors)
if not entry:
errors.append('Error adding a new course entry.')
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# We can't expect our new configuration being immediately available due
# to datastore queries consistency limitations. So we will instantiate
# our new course here and not use the normal sites.get_all_courses().
app_context = sites.get_all_courses(entry)[0]
# Update course with a new title and admin email.
new_course = courses.Course(None, app_context=app_context)
if not new_course.init_new_course_settings(title, admin_email):
transforms.send_json_response(
self, 412,
'Added new course entry, but failed to update title and/or '
'admin email. The course.yaml file already exists and must be '
'updated manually.')
return
transforms.send_json_response(
self, 200, 'Added.', {'entry': entry})
class ConfigPropertyItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a configuration property."""
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
if not ConfigPropertyRights.can_view():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
else:
entity_dict = {'name': key, 'is_draft': entity.is_draft}
entity_dict['value'] = transforms.string_to_value(
entity.value, item.value_type)
json_payload = transforms.dict_to_json(
entity_dict,
transforms.loads(
ConfigPropertyEditor.get_schema_json(item)))
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'config-property-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'config-property-put', {'key': key}):
return
if not ConfigPropertyRights.can_edit():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
json_object = transforms.loads(payload)
new_value = item.value_type(json_object['value'])
# Validate the value.
errors = []
if item.validator:
item.validator(new_value, errors)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# Update entity.
old_value = entity.value
entity.value = str(new_value)
entity.is_draft = json_object['is_draft']
entity.put()
models.EventEntity.record(
'put-property', users.get_current_user(), transforms.dumps({
'name': key,
'before': str(old_value), 'after': str(entity.value)}))
transforms.send_json_response(self, 200, 'Saved.')
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Site administration functionality."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import cStringIO
import datetime
import os
import sys
import time
import urllib
from appengine_config import PRODUCTION_MODE
from common import jinja_filters
from common import safe_dom
from common import tags
from controllers import sites
from controllers.utils import ReflectiveRequestHandler
import jinja2
from models import config
from models import counters
from models import custom_modules
from models import roles
from models.config import ConfigProperty
import modules.admin.config
from modules.admin.config import ConfigPropertyEditor
import webapp2
import messages
from google.appengine.api import users
import google.appengine.api.app_identity as app
DIRECT_CODE_EXECUTION_UI_ENABLED = False
# A time this module was initialized.
BEGINNING_OF_TIME = time.time()
DELEGATED_ACCESS_IS_NOT_ALLOWED = """
You must be an actual admin user to continue.
Users with the delegated admin rights are not allowed."""
def escape(text):
"""Escapes HTML in text."""
if text:
return cgi.escape(text)
return text
def evaluate_python_code(code):
"""Compiles and evaluates a Python script in a restricted environment."""
code = code.replace('\r\n', '\n')
save_stdout = sys.stdout
results_io = cStringIO.StringIO()
try:
sys.stdout = results_io
try:
compiled_code = compile(code, '<string>', 'exec')
exec(compiled_code, globals()) # pylint: disable-msg=exec-statement
except Exception as e: # pylint: disable-msg=broad-except
results_io.write('Error: %s' % e)
return results_io.getvalue(), False
finally:
sys.stdout = save_stdout
return results_io.getvalue(), True
class AdminHandler(
webapp2.RequestHandler, ReflectiveRequestHandler, ConfigPropertyEditor):
"""Handles all pages and actions required for administration of site."""
default_action = 'courses'
@property
def get_actions(self):
actions = [
self.default_action, 'settings', 'deployment', 'perf',
'config_edit', 'add_course']
if DIRECT_CODE_EXECUTION_UI_ENABLED:
actions.append('console')
return actions
@property
def post_actions(self):
actions = ['config_reset', 'config_override']
if DIRECT_CODE_EXECUTION_UI_ENABLED:
actions.append('console_run')
return actions
def can_view(self):
"""Checks if current user has viewing rights."""
return roles.Roles.is_super_admin()
def can_edit(self):
"""Checks if current user has editing rights."""
return self.can_view()
def get(self):
"""Enforces rights to all GET operations."""
if not self.can_view():
self.redirect('/')
return
# Force reload of properties. It is expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
return super(AdminHandler, self).get()
def post(self):
"""Enforces rights to all POST operations."""
if not self.can_edit():
self.redirect('/')
return
return super(AdminHandler, self).post()
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
jinja_environment = jinja2.Environment(
autoescape=True, finalize=jinja_filters.finalize,
loader=jinja2.FileSystemLoader(dirs + [os.path.dirname(__file__)]))
jinja_environment.filters['js_string'] = jinja_filters.js_string
return jinja_environment.get_template(template_name)
def _get_user_nav(self):
current_action = self.request.get('action')
nav_mappings = [
('', 'Courses'),
('settings', 'Settings'),
('perf', 'Metrics'),
('deployment', 'Deployment')]
if DIRECT_CODE_EXECUTION_UI_ENABLED:
nav_mappings.append(('console', 'Console'))
nav = safe_dom.NodeList()
for action, title in nav_mappings:
if action == current_action:
elt = safe_dom.Element(
'a', href='/admin?action=%s' % action,
className='selected')
else:
elt = safe_dom.Element('a', href='/admin?action=%s' % action)
elt.add_text(title)
nav.append(elt).append(safe_dom.Text(' '))
if PRODUCTION_MODE:
app_id = app.get_application_id()
nav.append(safe_dom.Element(
'a', target='_blank',
href=(
'https://appengine.google.com/'
'dashboard?app_id=s~%s' % app_id)
).add_text('Google App Engine'))
else:
nav.append(safe_dom.Element(
'a', target='_blank', href='http://localhost:8000/'
).add_text('Google App Engine')).append(safe_dom.Text(' '))
nav.append(safe_dom.Element(
'a', target='_blank',
href='https://code.google.com/p/course-builder/wiki/AdminPage'
).add_text('Help'))
return nav
def render_page(self, template_values):
"""Renders a page using provided template values."""
template_values['top_nav'] = self._get_user_nav()
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout')
)
template_values[
'page_footer'] = 'Created on: %s' % datetime.datetime.now()
self.response.write(
self.get_template('view.html', []).render(template_values))
def render_dict(self, source_dict, title):
"""Renders a dictionary ordered by keys."""
keys = sorted(source_dict.keys())
content = safe_dom.NodeList()
content.append(safe_dom.Element('h3').add_text(title))
ol = safe_dom.Element('ol')
content.append(ol)
for key in keys:
value = source_dict[key]
if isinstance(value, ConfigProperty):
value = value.value
ol.add_child(
safe_dom.Element('li').add_text('%s: %s' % (key, value)))
return content
def format_title(self, text):
"""Formats standard title."""
return safe_dom.NodeList().append(
safe_dom.Text('Course Builder ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' Admin ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s' % text))
def get_perf(self):
"""Shows server performance counters page."""
template_values = {}
template_values['page_title'] = self.format_title('Metrics')
template_values['page_description'] = messages.METRICS_DESCRIPTION
perf_counters = {}
# built in counters
perf_counters['gcb-admin-uptime-sec'] = long(
time.time() - BEGINNING_OF_TIME)
# config counters
perf_counters['gcb-config-overrides'] = len(
config.Registry.get_overrides())
perf_counters['gcb-config-age-sec'] = (
long(time.time()) - config.Registry.last_update_time)
perf_counters['gcb-config-update-time-sec'] = (
config.Registry.last_update_time)
perf_counters['gcb-config-update-index'] = config.Registry.update_index
# add all registered counters
all_counters = counters.Registry.registered.copy()
for name in all_counters.keys():
global_value = all_counters[name].global_value
if not global_value:
global_value = 'NA'
perf_counters[name] = '%s / %s' % (
all_counters[name].value, global_value)
template_values['main_content'] = self.render_dict(
perf_counters, 'In-process Performance Counters (local/global)')
self.render_page(template_values)
def _make_routes_dom(self, parent_element, routes, caption):
"""Renders routes as DOM."""
if routes:
# sort routes
all_routes = []
for route in routes:
if route:
all_routes.append(str(route))
# render as DOM
ul = safe_dom.Element('ul')
parent_element.add_child(ul)
ul.add_child(safe_dom.Element('li').add_text(caption))
ul2 = safe_dom.Element('ul')
ul.add_child(ul2)
for route in sorted(all_routes):
if route:
ul2.add_child(safe_dom.Element('li').add_text(route))
def get_deployment(self):
"""Shows server environment and deployment information page."""
template_values = {}
template_values['page_title'] = self.format_title('Deployment')
template_values['page_description'] = messages.DEPLOYMENT_DESCRIPTION
# modules
module_content = safe_dom.NodeList()
module_content.append(
safe_dom.Element('h3').add_text('Custom Modules'))
ol = safe_dom.Element('ol')
module_content.append(ol)
for name in sorted(custom_modules.Registry.registered_modules.keys()):
enabled_text = ''
if name not in custom_modules.Registry.enabled_module_names:
enabled_text = ' (disabled)'
li = safe_dom.Element('li').add_text('%s%s' % (name, enabled_text))
ol.add_child(li)
amodule = custom_modules.Registry.registered_modules.get(name)
self._make_routes_dom(
li, amodule.global_routes, 'Global Routes')
self._make_routes_dom(
li, amodule.namespaced_routes, 'Namespaced Routes')
# Custom tags.
tag_content = safe_dom.NodeList()
tag_content.append(
safe_dom.Element('h3').add_text('Custom Tags'))
ol = safe_dom.Element('ol')
tag_content.append(ol)
tag_bindings = tags.get_tag_bindings()
for name in sorted(tag_bindings.keys()):
clazz = tag_bindings.get(name)
tag = clazz()
vendor = tag.vendor()
ol.add_child(safe_dom.Element('li').add_text(
'%s: %s: %s' % (name, tag.__class__.__name__, vendor)))
# Yaml file content.
yaml_content = safe_dom.NodeList()
yaml_content.append(
safe_dom.Element('h3').add_text('Contents of ').add_child(
safe_dom.Element('code').add_text('app.yaml')))
ol = safe_dom.Element('ol')
yaml_content.append(ol)
yaml_lines = open(os.path.join(os.path.dirname(
__file__), '../../app.yaml'), 'r').readlines()
for line in yaml_lines:
ol.add_child(safe_dom.Element('li').add_text(line))
# Application identity.
app_id = app.get_application_id()
app_dict = {}
app_dict['application_id'] = escape(app_id)
app_dict['default_ver_hostname'] = escape(
app.get_default_version_hostname())
template_values['main_content'] = safe_dom.NodeList().append(
self.render_dict(app_dict, 'About the Application')
).append(
module_content
).append(
tag_content
).append(
yaml_content
).append(
self.render_dict(os.environ, 'Server Environment Variables'))
self.render_page(template_values)
def get_settings(self):
"""Shows configuration properties information page."""
template_values = {}
template_values['page_title'] = self.format_title('Settings')
template_values['page_description'] = messages.SETTINGS_DESCRIPTION
content = safe_dom.NodeList()
table = safe_dom.Element('table', className='gcb-config').add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('th').add_text('Name')
).add_child(
safe_dom.Element('th').add_text('Current Value')
).add_child(
safe_dom.Element('th').add_text('Actions')
).add_child(
safe_dom.Element('th').add_text('Description')
))
content.append(
safe_dom.Element('h3').add_text('All Settings')
).append(table)
def get_style_for(value, value_type):
"""Formats CSS style for given value."""
style = ''
if not value or value_type in [int, long, bool]:
style = 'text-align: center;'
return style
def get_action_html(caption, args, onclick=None):
"""Formats actions <a> link."""
a = safe_dom.Element(
'a', href='/admin?%s' % urllib.urlencode(args),
className='gcb-button'
).add_text(caption)
if onclick:
a.add_attribute(onclick=onclick)
return a
def get_actions(name, override):
"""Creates actions appropriate to an item."""
if override:
return get_action_html('Edit', {
'action': 'config_edit', 'name': name})
else:
return safe_dom.Element(
'form',
action='/admin?%s' % urllib.urlencode(
{'action': 'config_override', 'name': name}),
method='POST'
).add_child(
safe_dom.Element(
'input', type='hidden', name='xsrf_token',
value=self.create_xsrf_token('config_override'))
).add_child(
safe_dom.Element(
'button', className='gcb-button', type='submit'
).add_text('Override'))
def get_doc_string(item, default_value):
"""Formats an item documentation string for display."""
doc_string = item.doc_string
if not doc_string:
doc_string = 'No documentation available.'
if isinstance(doc_string, safe_dom.NodeList) or isinstance(
doc_string, safe_dom.Node):
return safe_dom.NodeList().append(doc_string).append(
safe_dom.Text(' Default: \'%s\'.' % default_value))
doc_string = ' %s Default: \'%s\'.' % (doc_string, default_value)
return safe_dom.Text(doc_string)
def get_lines(value):
"""Convert \\n line breaks into <br> and escape the lines."""
escaped_value = safe_dom.NodeList()
for line in str(value).split('\n'):
escaped_value.append(
safe_dom.Text(line)).append(safe_dom.Element('br'))
return escaped_value
# get fresh properties and their overrides
unused_overrides = config.Registry.get_overrides(force_update=True)
registered = config.Registry.registered.copy()
db_overrides = config.Registry.db_overrides.copy()
names_with_draft = config.Registry.names_with_draft.copy()
count = 0
for name in sorted(registered.keys()):
count += 1
item = registered[name]
has_environ_value, unused_environ_value = item.get_environ_value()
# figure out what kind of override this is
class_current = ''
if has_environ_value:
class_current = 'gcb-env-diff'
if item.name in db_overrides:
class_current = 'gcb-db-diff'
if item.name in names_with_draft:
class_current = 'gcb-db-draft'
# figure out default and current value
default_value = item.default_value
value = item.value
if default_value:
default_value = str(default_value)
if value:
value = str(value)
style_current = get_style_for(value, item.value_type)
tr = safe_dom.Element('tr')
table.add_child(tr)
tr.add_child(
safe_dom.Element(
'td', style='white-space: nowrap;').add_text(item.name))
td_value = safe_dom.Element('td').add_child(get_lines(value))
if style_current:
td_value.add_attribute(style=style_current)
if class_current:
td_value.add_attribute(className=class_current)
tr.add_child(td_value)
tr.add_child(
safe_dom.Element(
'td', style='white-space: nowrap;', align='center'
).add_child(get_actions(
name, name in db_overrides or name in names_with_draft)))
tr.add_child(
safe_dom.Element(
'td').add_child(get_doc_string(item, default_value)))
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element(
'td', colspan='4', align='right'
).add_text('Total: %s item(s)' % count)))
content.append(
safe_dom.Element('p').add_child(
safe_dom.Element('strong').add_text('Legend')
).add_text(':').add_text("""
For each property, the value shown corresponds to, in
descending order of priority:
""").add_child(
safe_dom.Element('span', className='gcb-db-diff').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the value override set via this page ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_child(
safe_dom.Element('span', className='gcb-db-draft').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the default value with pending value override ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_child(
safe_dom.Element('span', className='gcb-env-diff').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the environment value in app.yaml ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_text("""
and the [ default value ] in the Course Builder codebase.
"""))
template_values['main_content'] = content
self.render_page(template_values)
def get_courses(self):
"""Shows a list of all courses available on this site."""
template_values = {}
template_values['page_title'] = self.format_title('Courses')
template_values['page_description'] = messages.COURSES_DESCRIPTION
content = safe_dom.NodeList()
content.append(
safe_dom.Element(
'a', id='add_course', className='gcb-button gcb-pull-right',
role='button', href='admin?action=add_course'
).add_text('Add Course')
).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;')
).append(
safe_dom.Element('h3').add_text('All Courses')
)
table = safe_dom.Element('table')
content.append(table)
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('th').add_text('Course Title')
).add_child(
safe_dom.Element('th').add_text('Context Path')
).add_child(
safe_dom.Element('th').add_text('Content Location')
).add_child(
safe_dom.Element('th').add_text('Student Data Location')
)
)
courses = sites.get_all_courses()
count = 0
for course in courses:
count += 1
error = safe_dom.Text('')
slug = course.get_slug()
try:
name = course.get_title()
except Exception as e: # pylint: disable-msg=broad-except
name = 'UNKNOWN COURSE'
error = safe_dom.Element('p').add_text('Error in ').add_child(
safe_dom.Element('strong').add_text('course.yaml')
).add_text(' file. ').add_child(
safe_dom.Element('br')
).add_child(
safe_dom.Element('pre').add_text('\n%s\n%s\n' % (
e.__class__.__name__, str(e)))
)
if course.fs.is_read_write():
location = 'namespace: %s' % course.get_namespace_name()
else:
location = 'disk: %s' % sites.abspath(
course.get_home_folder(), '/')
if slug == '/':
link = '/dashboard'
else:
link = '%s/dashboard' % slug
link = safe_dom.Element('a', href=link).add_text(name)
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('td').add_child(link).add_child(error)
).add_child(
safe_dom.Element('td').add_text(slug)
).add_child(
safe_dom.Element('td').add_text(location)
).add_child(
safe_dom.Element('td').add_text(
'namespace: %s' % course.get_namespace_name())
))
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('td', colspan='4', align='right').add_text(
'Total: %s item(s)' % count)))
template_values['main_content'] = content
self.render_page(template_values)
def get_console(self):
"""Shows interactive Python console page."""
template_values = {}
template_values['page_title'] = self.format_title('Console')
# Check rights.
if not roles.Roles.is_direct_super_admin():
template_values['main_content'] = DELEGATED_ACCESS_IS_NOT_ALLOWED
self.render_page(template_values)
return
content = safe_dom.NodeList()
content.append(
safe_dom.Element('p').add_child(
safe_dom.Element('i').add_child(
safe_dom.Element('strong').add_text('WARNING!')
).add_text("""
The Interactive Console has the same
access to the application's environment and services as a .py file
inside the application itself. Be careful, because this means writes
to your data store will be executed for real!""")
)
).append(
safe_dom.Element('p').add_child(
safe_dom.Element('strong').add_text("""
Input your Python code below and press "Run Program" to execute.""")
)
).append(
safe_dom.Element(
'form', action='/admin?action=console_run', method='POST'
).add_child(
safe_dom.Element(
'input', type='hidden', name='xsrf_token',
value=self.create_xsrf_token('console_run'))
).add_child(
safe_dom.Element(
'textarea', style='width: 95%; height: 200px;',
name='code')
).add_child(
safe_dom.Element('p', align='center').add_child(
safe_dom.Element(
'button', className='gcb-button', type='submit'
).add_text('Run Program')
)
)
)
template_values['main_content'] = content
self.render_page(template_values)
def post_console_run(self):
"""Executes dynamically submitted Python code."""
template_values = {}
template_values['page_title'] = self.format_title('Execution Results')
# Check rights.
if not roles.Roles.is_direct_super_admin():
template_values['main_content'] = DELEGATED_ACCESS_IS_NOT_ALLOWED
self.render_page(template_values)
return
# Execute code.
code = self.request.get('code')
time_before = time.time()
output, results = evaluate_python_code(code)
duration = long(time.time() - time_before)
status = 'FAILURE'
if results:
status = 'SUCCESS'
# Render results.
content = safe_dom.NodeList()
content.append(
safe_dom.Element('h3').add_text('Submitted Python Code'))
ol = safe_dom.Element('ol')
content.append(ol)
for line in code.split('\n'):
ol.add_child(safe_dom.Element('li').add_text(line))
content.append(
safe_dom.Element('h3').add_text('Execution Results')
).append(
safe_dom.Element('ol').add_child(
safe_dom.Element('li').add_text('Status: %s' % status)
).add_child(
safe_dom.Element('li').add_text('Duration (sec): %s' % duration)
)
).append(
safe_dom.Element('h3').add_text('Program Output')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text(output))
)
template_values['main_content'] = content
self.render_page(template_values)
custom_module = None
def register_module():
"""Registers this module in the registry."""
admin_handlers = [
('/admin', AdminHandler),
('/rest/config/item', (
modules.admin.config.ConfigPropertyItemRESTHandler)),
('/rest/courses/item', modules.admin.config.CoursesItemRESTHandler)]
global custom_module
custom_module = custom_modules.Module(
'Site Admin',
'A set of pages for Course Builder site administrator.',
admin_handlers, [])
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import custom_modules
from tools import verify
custom_module = None
def register_module():
"""Registers this module in the registry."""
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.ActivityHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Announcements."""
__author__ = 'Saifu Angto (saifu@google.com)'
import datetime
import urllib
from common import tags
from controllers.utils import BaseHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import ReflectiveRequestHandler
from controllers.utils import XsrfTokenManager
from models import custom_modules
from models import entities
from models import notify
from models import roles
from models import transforms
from models.models import MemcacheManager
import modules.announcements.samples as samples
from modules.oeditor import oeditor
from google.appengine.ext import db
class AnnouncementsRights(object):
"""Manages view/edit rights for announcements."""
@classmethod
def can_view(cls, unused_handler):
return True
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
@classmethod
def apply_rights(cls, handler, items):
"""Filter out items that current user can't see."""
if AnnouncementsRights.can_edit(handler):
return items
allowed = []
for item in items:
if not item.is_draft:
allowed.append(item)
return allowed
class AnnouncementsHandler(BaseHandler, ReflectiveRequestHandler):
"""Handler for announcements."""
default_action = 'list'
get_actions = [default_action, 'edit']
post_actions = ['add', 'delete']
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [('/rest/announcements/item', AnnouncementsItemRESTHandler)]
def get_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'/announcements?%s' % urllib.urlencode(args))
def format_items_for_template(self, items):
"""Formats a list of entities into template values."""
template_items = []
for item in items:
item = transforms.entity_to_dict(item)
# add 'edit' actions
if AnnouncementsRights.can_edit(self):
item['edit_action'] = self.get_action_url(
'edit', key=item['key'])
item['delete_xsrf_token'] = self.create_xsrf_token('delete')
item['delete_action'] = self.get_action_url(
'delete', key=item['key'])
template_items.append(item)
output = {}
output['children'] = template_items
# add 'add' action
if AnnouncementsRights.can_edit(self):
output['add_xsrf_token'] = self.create_xsrf_token('add')
output['add_action'] = self.get_action_url('add')
return output
def put_sample_announcements(self):
"""Loads sample data into a database."""
items = []
for item in samples.SAMPLE_ANNOUNCEMENTS:
entity = AnnouncementEntity()
transforms.dict_to_entity(entity, item)
entity.put()
items.append(entity)
return items
def get_list(self):
"""Shows a list of announcements."""
if not self.personalize_page_and_get_enrolled():
return
items = AnnouncementEntity.get_announcements()
if not items and AnnouncementsRights.can_edit(self):
items = self.put_sample_announcements()
items = AnnouncementsRights.apply_rights(self, items)
self.template_value['announcements'] = self.format_items_for_template(
items)
self.template_value['navbar'] = {'announcements': True}
self.render('announcements.html')
def get_edit(self):
"""Shows an editor for an announcement."""
if not AnnouncementsRights.can_edit(self):
self.error(401)
return
key = self.request.get('key')
exit_url = self.canonicalize_url(
'/announcements#%s' % urllib.quote(key, safe=''))
rest_url = self.canonicalize_url('/rest/announcements/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
AnnouncementsItemRESTHandler.SCHEMA_JSON,
AnnouncementsItemRESTHandler.get_schema_annotation_dict(
self.get_course().get_course_announcement_list_email()),
key, rest_url, exit_url,
required_modules=AnnouncementsItemRESTHandler.REQUIRED_MODULES)
self.template_value['navbar'] = {'announcements': True}
self.template_value['content'] = form_html
self.render('bare.html')
def post_delete(self):
"""Deletes an announcement."""
if not AnnouncementsRights.can_delete(self):
self.error(401)
return
key = self.request.get('key')
entity = AnnouncementEntity.get(key)
if entity:
entity.delete()
self.redirect('/announcements')
def post_add(self):
"""Adds a new announcement and redirects to an editor for it."""
if not AnnouncementsRights.can_add(self):
self.error(401)
return
entity = AnnouncementEntity()
entity.title = 'Sample Announcement'
entity.date = datetime.datetime.now().date()
entity.html = 'Here is my announcement!'
entity.is_draft = True
entity.put()
self.redirect(self.get_action_url('edit', key=entity.key()))
class AnnouncementsItemRESTHandler(BaseRESTHandler):
"""Provides REST API for an announcement."""
# TODO(psimakov): we should really use an ordered dictionary, not plain
# text; it can't be just a normal dict because a dict iterates its items in
# undefined order; thus when we render a dict to JSON an order of fields
# will not match what we specify here; the final editor will also show the
# fields in an undefined order; for now we use the raw JSON, rather than the
# dict, but will move to an ordered dict late.
SCHEMA_JSON = """
{
"id": "Announcement Entity",
"type": "object",
"description": "Announcement",
"properties": {
"key" : {"type": "string"},
"title": {"optional": true, "type": "string"},
"date": {"optional": true, "type": "date"},
"html": {"optional": true, "type": "html"},
"is_draft": {"type": "boolean"},
"send_email": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-date', 'gcb-rte', 'inputex-select', 'inputex-string',
'inputex-uneditable', 'inputex-checkbox']
@staticmethod
def get_send_email_description(announcement_email):
"""Get the description for Send Email field."""
if announcement_email:
return 'Email will be sent to : ' + announcement_email
return 'Announcement list not configured.'
@staticmethod
def get_schema_annotation_dict(announcement_email):
"""Utility to get schema annotation dict for this course."""
schema_dict = [
(['title'], 'Announcement'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'date', '_inputex'], {
'label': 'Date', '_type': 'date', 'dateFormat': 'Y/m/d',
'valueFormat': 'Y/m/d'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'html', '_inputex'], {
'label': 'Body', '_type': 'html',
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Draft', 'Published'),
(['properties', 'send_email', '_inputex'], {
'label': 'Send Email', '_type': 'boolean',
'description':
AnnouncementsItemRESTHandler.get_send_email_description(
announcement_email)})]
return schema_dict
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
try:
entity = AnnouncementEntity.get(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
viewable = AnnouncementsRights.apply_rights(self, [entity])
if not viewable:
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = viewable[0]
json_payload = transforms.dict_to_json(transforms.entity_to_dict(
entity), AnnouncementsItemRESTHandler.SCHEMA_DICT)
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'announcement-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'announcement-put', {'key': key}):
return
if not AnnouncementsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = AnnouncementEntity.get(key)
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
transforms.dict_to_entity(entity, transforms.json_to_dict(
transforms.loads(payload),
AnnouncementsItemRESTHandler.SCHEMA_DICT))
entity.put()
email_sent = False
if entity.send_email:
email_manager = notify.EmailManager(self.get_course())
email_sent = email_manager.send_announcement(
entity.title, entity.html)
if entity.send_email and not email_sent:
if not self.get_course().get_course_announcement_list_email():
message = 'Saved. Announcement list not configured.'
else:
message = 'Saved, but there was an error sending email.'
else:
message = 'Saved.'
transforms.send_json_response(self, 200, message)
class AnnouncementEntity(entities.BaseEntity):
"""A class that represents a persistent database entity of announcement."""
title = db.StringProperty(indexed=False)
date = db.DateProperty()
html = db.TextProperty(indexed=False)
is_draft = db.BooleanProperty()
send_email = db.BooleanProperty()
memcache_key = 'announcements'
@classmethod
def get_announcements(cls, allow_cached=True):
items = MemcacheManager.get(cls.memcache_key)
if not allow_cached or items is None:
items = AnnouncementEntity.all().order('-date').fetch(1000)
# TODO(psimakov): prepare to exceed 1MB max item size
# read more here: http://stackoverflow.com
# /questions/5081502/memcache-1-mb-limit-in-google-app-engine
MemcacheManager.set(cls.memcache_key, items)
return items
def put(self):
"""Do the normal put() and also invalidate memcache."""
result = super(AnnouncementEntity, self).put()
MemcacheManager.delete(self.memcache_key)
return result
def delete(self):
"""Do the normal delete() and invalidate memcache."""
super(AnnouncementEntity, self).delete()
MemcacheManager.delete(self.memcache_key)
custom_module = None
def register_module():
"""Registers this module in the registry."""
announcement_handlers = [('/announcements', AnnouncementsHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course Announcements',
'A set of pages for managing course announcements.',
[], announcement_handlers)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample announcements."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
SAMPLE_ANNOUNCEMENT_1 = {
'edit_url': None,
'title': 'Example Announcement',
'date': datetime.date(2012, 10, 6),
'is_draft': False,
'html': """
<br>Certificates will be e-mailed to qualifying participants by
Friday, October 12.
<br>
<br>Do you want to check your assessment scores? Visit the
<a href="student/home">"My profile"</a> page!</p>
"""}
SAMPLE_ANNOUNCEMENT_2 = {
'edit_url': None,
'title': 'Welcome to Class 6 and the Post-class Assessment',
'date': datetime.date(2012, 10, 5),
'is_draft': True,
'html': """
<br>Welcome to the final class! <a href="class?class=6"> Class 6</a>
focuses on combining the skills you have learned throughout the class
to maximize the effectiveness of your searches.
<br>
<br><b>Customize Your Experience</b>
<br>You can customize your experience in several ways:
<ul>
<li>You can watch the videos multiple times for a deeper understanding
of each lesson. </li>
<li>You can read the text version for each lesson. Click the button
above the video to access it.</li>
<li>Lesson activities are designed for multiple levels of experience.
The first question checks your recall of the material in the video;
the second question lets you verify your mastery of the lesson; the
third question is an opportunity to apply your skills and share your
experiences in the class forums. You can answer some or all of the
questions depending on your familiarity and interest in the topic.
Activities are not graded and do not affect your final grade. </li>
<li>We'll also post extra challenges in the forums for people who seek
additional opportunities to practice and test their new skills!</li>
</ul>
<br><b>Forum</b>
<br>Apply your skills, share with others, and connect with your peers
and course staff in the <a href="forum">forum.</a> Discuss your favorite
search tips and troubleshoot technical issues. We'll also post bonus
videos and challenges there!
<p> </p>
<p>For an optimal learning experience, please plan to use the most
recent version of your browser, as well as a desktop, laptop or a tablet
computer instead of your mobile phone.</p>
"""}
SAMPLE_ANNOUNCEMENTS = [SAMPLE_ANNOUNCEMENT_1, SAMPLE_ANNOUNCEMENT_2]
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cron job definitions for the review subsystem."""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
from controllers import sites
from controllers import utils
from models import courses
from modules.review import review
from google.appengine.api import namespace_manager
_LOG = logging.getLogger('modules.reviews.cron')
logging.basicConfig()
class ExpireOldAssignedReviewsHandler(utils.BaseHandler):
"""Iterates through all units in all courses, expiring old review steps.
The system will run a maximum of one of these jobs at any given time. This
is enforced by the 10 minute execution time limit on cron jobs plus the
scheduler, which is configured to run this every 15 minutes.
Write operations done by this handler must be atomic since admins may visit
this page at any time, kicking off any number of runs.
"""
def get(self):
"""Runs the expiry operation once for each peer-reviwed unit."""
try:
self.response.headers['Content-Type'] = 'text/plain'
# namespace_string -> [{
# 'id': unit_id_string, 'review_window_mins': int}]
namespace_to_units = {} # namespace_string -> [unit_id_strings]
for context in sites.get_all_courses():
namespace = context.get_namespace_name()
namespace_to_units[namespace] = []
course = courses.Course(None, context)
for unit in course.get_peer_reviewed_units():
namespace_to_units[namespace].append({
'review_window_mins': (
unit.workflow.get_review_window_mins()),
'id': str(unit.unit_id),
})
total_count = 0
total_expired_count = 0
total_exception_count = 0
_LOG.info('Begin expire_old_assigned_reviews cron')
for namespace, units in namespace_to_units.iteritems():
start_namespace_message = (
('Begin processing course in namespace "%s"; %s unit%s '
'found') % (
namespace, len(units), '' if len(units) == 1 else 's'))
_LOG.info(start_namespace_message)
for unit in units:
begin_unit_message = 'Begin processing unit %s' % unit['id']
_LOG.info(begin_unit_message)
namespace_manager.set_namespace(namespace)
expired_keys, exception_keys = (
review.Manager.expire_old_reviews_for_unit(
unit['review_window_mins'], unit['id']))
unit_expired_count = len(expired_keys)
unit_exception_count = len(exception_keys)
unit_total_count = unit_expired_count + unit_exception_count
total_expired_count += unit_expired_count
total_exception_count += total_exception_count
total_count += unit_total_count
end_unit_message = (
'End processing unit %s. Expired: %s, Exceptions: %s, '
'Total: %s' % (
unit['id'], unit_expired_count,
unit_exception_count, unit_total_count))
_LOG.info(end_unit_message)
_LOG.info('Done processing namespace "%s"', namespace)
end_message = (
('End expire_old_assigned_reviews cron. Expired: %s, '
'Exceptions : %s, Total: %s') % (
total_expired_count, total_exception_count, total_count))
_LOG.info(end_message)
self.response.write('OK\n')
except: # Hide all errors. pylint: disable-msg=bare-except
pass
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for displaying peer review analytics."""
__author__ = 'Sean Lip (sll@google.com)'
import os
from common import safe_dom
from controllers.utils import ApplicationHandler
from controllers.utils import HUMAN_READABLE_TIME_FORMAT
import jinja2
from models import courses
from models import jobs
from models import transforms
from models import utils
from modules.review import peer
class ReviewStatsAggregator(object):
"""Aggregates peer review statistics."""
def __init__(self):
# This dict records, for each unit, how many submissions have a given
# number of completed reviews. The format of each key-value pair is
# unit_id: {num_reviews: count_of_submissions}
self.counts_by_completed_reviews = {}
def visit(self, review_summary):
unit_id = review_summary.unit_id
if unit_id not in self.counts_by_completed_reviews:
self.counts_by_completed_reviews[unit_id] = {}
count = review_summary.completed_count
if count not in self.counts_by_completed_reviews[unit_id]:
self.counts_by_completed_reviews[unit_id][count] = 1
else:
self.counts_by_completed_reviews[unit_id][count] += 1
class ComputeReviewStats(jobs.DurableJob):
"""A job for computing peer review statistics."""
def run(self):
"""Computes peer review statistics."""
stats = ReviewStatsAggregator()
mapper = utils.QueryMapper(
peer.ReviewSummary.all(), batch_size=500, report_every=1000)
mapper.run(stats.visit)
completed_arrays_by_unit = {}
for unit_id in stats.counts_by_completed_reviews:
max_completed_reviews = max(
stats.counts_by_completed_reviews[unit_id].keys())
completed_reviews_array = []
for i in range(max_completed_reviews + 1):
if i in stats.counts_by_completed_reviews[unit_id]:
completed_reviews_array.append(
stats.counts_by_completed_reviews[unit_id][i])
else:
completed_reviews_array.append(0)
completed_arrays_by_unit[unit_id] = completed_reviews_array
return {'counts_by_completed_reviews': completed_arrays_by_unit}
class PeerReviewStatsHandler(ApplicationHandler):
"""Shows peer review analytics on the dashboard."""
# The key used in the statistics dict that generates the dashboard page.
# Must be unique.
name = 'peer_review_stats'
# The class that generates the data to be displayed. It should have a
# get_stats() method.
stats_computer = ComputeReviewStats
def get_markup(self, job):
"""Returns Jinja markup for peer review analytics."""
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
course = courses.Course(self)
serialized_units = []
if not job:
update_message = safe_dom.Text(
'Peer review statistics have not been calculated yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
stats = transforms.loads(job.output)
stats_calculated = True
for unit in course.get_peer_reviewed_units():
if unit.unit_id in stats['counts_by_completed_reviews']:
unit_stats = (
stats['counts_by_completed_reviews'][unit.unit_id])
serialized_units.append({
'stats': unit_stats,
'title': unit.title,
'unit_id': unit.unit_id,
})
update_message = safe_dom.Text("""
Peer review statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating peer review statistics.
Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text("""
Peer review statistics update started at %s and is running
now. Please come back shortly.""" % job.updated_on.strftime(
HUMAN_READABLE_TIME_FORMAT))
return jinja2.utils.Markup(self.get_template(
'stats.html', [os.path.dirname(__file__)]
).render({
'errors': errors,
'serialized_units': serialized_units,
'serialized_units_json': transforms.dumps(serialized_units),
'stats_calculated': stats_calculated,
'update_message': update_message,
}, autoescape=True))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the review subsystem."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
import random
from models import counters
from models import custom_modules
from models import entities
from models import student_work
from models import utils
import models.review
from modules.review import domain
from modules.review import peer
from google.appengine.ext import db
# In-process increment-only performance counters.
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY = counters.PerfCounter(
'gcb-pr-add-reviewer-bad-summary-key',
'number of times add_reviewer() failed due to a bad review summary key')
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN = counters.PerfCounter(
'gcb-pr-add-reviewer-set-assigner-kind-human',
("number of times add_reviewer() changed an existing step's assigner_kind "
'to ASSIGNER_KIND_HUMAN'))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP = counters.PerfCounter(
'gcb-pr-add-reviewer-create-review-step',
'number of times add_reviewer() created a new review step')
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED = counters.PerfCounter(
'gcb-pr-add-reviewer-expired-step-reassigned',
'number of times add_reviewer() reassigned an expired step')
COUNTER_ADD_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-failed',
'number of times add_reviewer() had a fatal error')
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED = counters.PerfCounter(
'gcb-pr-add-reviewer-removed-step-unremoved',
'number of times add_reviewer() unremoved a removed review step')
COUNTER_ADD_REVIEWER_START = counters.PerfCounter(
'gcb-pr-add-reviewer-start',
'number of times add_reviewer() has started processing')
COUNTER_ADD_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-add-reviewer-success',
'number of times add_reviewer() completed successfully')
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-unremoved-step-failed',
('number of times add_reviewer() failed on an unremoved step with a fatal '
'error'))
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED = counters.PerfCounter(
'gcb-pr-assignment-candidates-query-results-returned',
('number of results returned by the query returned by '
'get_assignment_candidates_query()'))
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-already-removed',
('number of times delete_reviewer() called on review step with removed '
'already True'))
COUNTER_DELETE_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-failed',
'number of times delete_reviewer() had a fatal error')
COUNTER_DELETE_REVIEWER_START = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-start',
'number of times delete_reviewer() has started processing')
COUNTER_DELETE_REVIEWER_STEP_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-step-miss',
'number of times delete_reviewer() found a missing review step')
COUNTER_DELETE_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-success',
'number of times delete_reviewer() completed successfully')
COUNTER_DELETE_REVIEWER_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-summary-miss',
'number of times delete_reviewer() found a missing review summary')
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION = counters.PerfCounter(
'gcb-pr-expire-review-cannot-transition',
('number of times expire_review() was called on a review step that could '
'not be transitioned to REVIEW_STATE_EXPIRED'))
COUNTER_EXPIRE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-expire-review-failed',
'number of times expire_review() had a fatal error')
COUNTER_EXPIRE_REVIEW_START = counters.PerfCounter(
'gcb-pr-expire-review-start',
'number of times expire_review() has started processing')
COUNTER_EXPIRE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-expire-review-step-miss',
'number of times expire_review() found a missing review step')
COUNTER_EXPIRE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-review-success',
'number of times expire_review() completed successfully')
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-expire-review-summary-miss',
'number of times expire_review() found a missing review summary')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-expire',
'number of records expire_old_reviews_for_unit() has expired')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-skip',
('number of times expire_old_reviews_for_unit() skipped a record due to an '
'error'))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-start',
'number of times expire_old_reviews_for_unit() has started processing')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-success',
'number of times expire_old_reviews_for_unit() completed successfully')
COUNTER_EXPIRY_QUERY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-expiry-query-keys-returned',
'number of keys returned by the query returned by get_expiry_query()')
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED = counters.PerfCounter(
'gcb-pr-get-new-review-already-assigned',
('number of times get_new_review() rejected a candidate because the '
'reviewer is already assigned to or has already completed it'))
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED = counters.PerfCounter(
'gcb-pr-get-new-review-assignment-attempted',
'number of times get_new_review() attempted to assign a candidate')
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED = counters.PerfCounter(
'gcb-pr-get-new-review-cannot-unremove-completed',
('number of times get_new_review() failed because the reviewer already had '
'a completed, removed review step'))
COUNTER_GET_NEW_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-get-new-review-failed',
'number of times get_new_review() had a fatal error')
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE = counters.PerfCounter(
'gcb-pr-get-new-review-none-assignable',
'number of times get_new_review() failed to find an assignable review')
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING = counters.PerfCounter(
'gcb-pr-get-new-review-reassign-existing',
('number of times get_new_review() unremoved and reassigned an existing '
'review step'))
COUNTER_GET_NEW_REVIEW_START = counters.PerfCounter(
'gcb-pr-get-new-review-start',
'number of times get_new_review() has started processing')
COUNTER_GET_NEW_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-get-new-review-success',
'number of times get_new_review() found and assigned a new review')
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED = counters.PerfCounter(
'gcb-pr-get-new-review-summary-changed',
('number of times get_new_review() rejected a candidate because the review '
'summary changed during processing'))
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-keys-returned',
'number of keys get_review_step_keys_by() returned')
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-failed',
'number of times get_review_step_keys_by() had a fatal error')
COUNTER_GET_REVIEW_STEP_KEYS_BY_START = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-start',
'number of times get_review_step_keys_by() started processing')
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-success',
'number of times get_review_step_keys_by() completed successfully')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-failed',
'number of times get_submission_and_review_step_keys() had a fatal error')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-keys-returned',
'number of keys get_submission_and_review_step_keys() returned')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-start',
('number of times get_submission_and_review_step_keys() has begun '
'processing'))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS = (
counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-submission-miss',
('number of times get_submission_and_review_step_keys() failed to find '
'a submission_key')))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step_keys-success',
('number of times get_submission-and-review-step-keys() completed '
'successfully'))
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED = counters.PerfCounter(
'gcb-pr-start-review-process-for-already-started',
('number of times start_review_process_for() called when review already '
'started'))
COUNTER_START_REVIEW_PROCESS_FOR_FAILED = counters.PerfCounter(
'gcb-pr-start-review-process-for-failed',
'number of times start_review_process_for() had a fatal error')
COUNTER_START_REVIEW_PROCESS_FOR_START = counters.PerfCounter(
'gcb-pr-start-review-process-for-start',
'number of times start_review_process_for() has started processing')
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS = counters.PerfCounter(
'gcb-pr-start-review-process-for-success',
'number of times start_review_process_for() completed successfully')
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-assigned-step',
'number of times write_review() transitioned an assigned step to completed')
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-expired-step',
'number of times write_review() transitioned an expired step to completed')
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-created-new-review',
'number of times write_review() created a new review')
COUNTER_WRITE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-write-review-failed',
'number of times write_review() had a fatal error')
COUNTER_WRITE_REVIEW_REVIEW_MISS = counters.PerfCounter(
'gcb-pr-write-review-review-miss',
'number of times write_review() found a missing review')
COUNTER_WRITE_REVIEW_START = counters.PerfCounter(
'gcb-pr-write-review-start',
'number of times write_review() started processing')
COUNTER_WRITE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-write-review-step-miss',
'number of times write_review() found a missing review step')
COUNTER_WRITE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-write-review-summary-miss',
'number of times write_review() found a missing review summary')
COUNTER_WRITE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-write-review-success',
'number of times write_review() completed successfully')
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-updated-existing-review',
'number of times write_review() updated an existing review')
# Number of entities to fetch when querying for all review steps that meet
# given criteria. Ideally we'd cursor through results rather than setting a
# ceiling, but for now let's allow as many removed results as unremoved.
_REVIEW_STEP_QUERY_LIMIT = 2 * domain.MAX_UNREMOVED_REVIEW_STEPS
class Manager(object):
"""Object that manages the review subsystem."""
@classmethod
def add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
"""Adds a reviewer for a submission.
If there is no pre-existing review step, one will be created.
Attempting to add an existing unremoved step in REVIEW_STATE_ASSIGNED or
REVIEW_STATE_COMPLETED is an error.
If there is an existing unremoved review in REVIEW_STATE_EXPIRED, it
will be put in REVIEW_STATE_ASSIGNED. If there is a removed review in
REVIEW_STATE_ASSIGNED or REVIEW_STATE_EXPIRED, it will be put in
REVIEW_STATE_ASSIGNED and unremoved. If it is in REVIEW_STATE_COMPLETED,
it will be unremoved but its state will not change. In all these cases
the assigner kind will be set to ASSIGNER_KIND_HUMAN.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
reviewer_key: db.Key of models.models.Student. The student to add as
a reviewer.
Raises:
domain.TransitionError: if there is a pre-existing review step found
in domain.REVIEW_STATE_ASSIGNED|COMPLETED.
Returns:
db.Key of written review step.
"""
try:
COUNTER_ADD_REVIEWER_START.inc()
key = cls._add_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
COUNTER_ADD_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_ADD_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
found = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(submission_key, reviewer_key))
if not found:
return cls._add_new_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
else:
return cls._add_reviewer_update_step(found)
@classmethod
def _add_new_reviewer(
cls, unit_id, submission_key, reviewee_key, reviewer_key):
summary = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee_key,
submission_key=submission_key, unit_id=unit_id)
# Synthesize summary key to avoid a second synchronous put op.
summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(submission_key))
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=reviewee_key,
reviewer_key=reviewer_key, state=domain.REVIEW_STATE_ASSIGNED,
submission_key=submission_key, unit_id=unit_id)
step_key, written_summary_key = entities.put([step, summary])
if summary_key != written_summary_key:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Synthesized invalid review summary key %s' % repr(summary_key))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP.inc()
return step_key
@classmethod
def _add_reviewer_update_step(cls, step):
should_increment_human = False
should_increment_reassigned = False
should_increment_unremoved = False
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Found invalid review summary key %s' % repr(
step.review_summary_key))
if not step.removed:
if step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
elif (step.state == domain.REVIEW_STATE_ASSIGNED or
step.state == domain.REVIEW_STATE_COMPLETED):
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED.inc()
raise domain.TransitionError(
'Unable to add new reviewer to step %s' % (
repr(step.key())),
step.state, domain.REVIEW_STATE_ASSIGNED)
else:
should_increment_unremoved = True
step.removed = False
if step.state != domain.REVIEW_STATE_EXPIRED:
summary.increment_count(step.state)
else:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
if step.assigner_kind != domain.ASSIGNER_KIND_HUMAN:
should_increment_human = True
step.assigner_kind = domain.ASSIGNER_KIND_HUMAN
step_key = entities.put([step, summary])[0]
if should_increment_human:
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN.inc()
if should_increment_reassigned:
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED.inc()
if should_increment_unremoved:
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED.inc()
return step_key
@classmethod
def delete_reviewer(cls, review_step_key):
"""Deletes the given review step.
We do not physically delete the review step; we mark it as removed,
meaning it will be ignored from most queries and the associated review
summary will have its corresponding state count decremented. Calling
this method on a removed review step is an error.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to delete.
Raises:
domain.RemovedError: if called on a review step that has already
been marked removed.
KeyError: if there is no review step with the given key, or if the
step references a review summary that does not exist.
Returns:
db.Key of deleted review step.
"""
try:
COUNTER_DELETE_REVIEWER_START.inc()
key = cls._mark_review_step_removed(review_step_key)
COUNTER_DELETE_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_DELETE_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _mark_review_step_removed(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_DELETE_REVIEWER_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED.inc()
raise domain.RemovedError(
'Cannot remove step %s' % repr(review_step_key), step.removed)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_DELETE_REVIEWER_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
step.removed = True
summary.decrement_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_review(cls, review_step_key):
"""Puts a review step in state REVIEW_STATE_EXPIRED.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to expire.
Raises:
domain.RemovedError: if called on a step that is removed.
domain.TransitionError: if called on a review step that cannot be
transitioned to REVIEW_STATE_EXPIRED (that is, it is already in
REVIEW_STATE_COMPLETED or REVIEW_STATE_EXPIRED).
KeyError: if there is no review with the given key, or the step
references a review summary that does not exist.
Returns:
db.Key of the expired review step.
"""
try:
COUNTER_EXPIRE_REVIEW_START.inc()
key = cls._transition_state_to_expired(review_step_key)
COUNTER_EXPIRE_REVIEW_SUCCESS.inc()
return key
except Exception as e:
COUNTER_EXPIRE_REVIEW_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _transition_state_to_expired(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_EXPIRE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.RemovedError(
'Cannot transition step %s' % repr(review_step_key),
step.removed)
if step.state in (
domain.REVIEW_STATE_COMPLETED, domain.REVIEW_STATE_EXPIRED):
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.TransitionError(
'Cannot transition step %s' % repr(review_step_key),
step.state, domain.REVIEW_STATE_EXPIRED)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_EXPIRED
summary.increment_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_old_reviews_for_unit(cls, review_window_mins, unit_id):
"""Finds and expires all old review steps for a single unit.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
Returns:
2-tuple of list of db.Key of peer.ReviewStep. 0th element is keys
that were written successfully; 1st element is keys that we failed
to update.
"""
query = cls.get_expiry_query(review_window_mins, unit_id)
mapper = utils.QueryMapper(
query, counter=COUNTER_EXPIRY_QUERY_KEYS_RETURNED, report_every=100)
expired_keys = []
exception_keys = []
def map_fn(review_step_key, expired_keys, exception_keys):
try:
expired_keys.append(cls.expire_review(review_step_key))
except: # All errors are the same. pylint: disable-msg=bare-except
# Skip. Either the entity was updated between the query and
# the update, meaning we don't need to expire it; or we ran into
# a transient datastore error, meaning we'll expire it next
# time.
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP.inc()
exception_keys.append(review_step_key)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START.inc()
mapper.run(map_fn, expired_keys, exception_keys)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE.inc(
increment=len(expired_keys))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS.inc()
return expired_keys, exception_keys
@classmethod
def get_assignment_candidates_query(cls, unit_id):
"""Gets query that returns candidates for new review assignment.
New assignment candidates are scoped to a unit. We prefer first items
that have the smallest number of completed reviews, then those that have
the smallest number of assigned reviews, then those that were created
most recently.
The results of the query are user-independent.
Args:
unit_id: string. Id of the unit to restrict the query to.
Returns:
db.Query that will return [peer.ReviewSummary].
"""
return peer.ReviewSummary.all(
).filter(
peer.ReviewSummary.unit_id.name, unit_id
).order(
peer.ReviewSummary.completed_count.name
).order(
peer.ReviewSummary.assigned_count.name
).order(
peer.ReviewSummary.create_date.name)
@classmethod
def get_expiry_query(
cls, review_window_mins, unit_id, now_fn=datetime.datetime.now):
"""Gets a db.Query that returns review steps to mark expired.
Results are items that were assigned by machine, are currently assigned,
are not removed, were last updated more than review_window_mins ago,
and are ordered by change date ascending.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
now_fn: function that returns the current UTC datetime. Injectable
for tests only.
Returns:
db.Query.
"""
get_before = now_fn() - datetime.timedelta(
minutes=review_window_mins)
return peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.unit_id.name, unit_id,
).filter(
peer.ReviewStep.assigner_kind.name, domain.ASSIGNER_KIND_AUTO
).filter(
peer.ReviewStep.state.name, domain.REVIEW_STATE_ASSIGNED
).filter(
peer.ReviewStep.removed.name, False
).filter(
'%s <=' % peer.ReviewStep.change_date.name, get_before
).order(
peer.ReviewStep.change_date.name)
@classmethod
def get_new_review(
cls, unit_id, reviewer_key, candidate_count=20, max_retries=5):
"""Attempts to assign a review to a reviewer.
We prioritize possible reviews by querying review summary objects,
finding those that best satisfy cls.get_assignment_candidates_query.
To minimize write contention, we nontransactionally grab candidate_count
candidates from the head of the query results. Post-query we filter out
any candidates that are for the prospective reviewer's own work.
Then we randomly select one. We transactionally attempt to assign that
review. If assignment fails because the candidate is updated between
selection and assignment or the assignment is for a submission the
reviewer already has or has already done, we remove the candidate from
the list. We then retry assignment up to max_retries times. If we run
out of retries or candidates, we raise domain.NotAssignableError.
This is a naive implementation because it scales only to relatively low
new review assignments per second and because it can raise
domain.NotAssignableError when there are in fact assignable reviews.
Args:
unit_id: string. The unit to assign work from.
reviewer_key: db.Key of models.models.Student. The reviewer to
attempt to assign the review to.
candidate_count: int. The number of candidate keys to fetch and
attempt to assign from. Increasing this decreases the chance
that we will have write contention on reviews, but it costs 1 +
num_results datastore reads and can get expensive for large
courses.
max_retries: int. Number of times to retry failed assignment
attempts. Careful not to set this too high as a) datastore
throughput is slow and latency from this method is user-facing,
and b) if you encounter a few failures it is likely that all
candidates are now failures, so each retry past the first few is
of questionable value.
Raises:
domain.NotAssignableError: if no review can currently be assigned
for the given unit_id.
Returns:
db.Key of peer.ReviewStep. The newly created assigned review step.
"""
try:
COUNTER_GET_NEW_REVIEW_START.inc()
# Filter out candidates that are for submissions by the reviewer.
raw_candidates = cls.get_assignment_candidates_query(unit_id).fetch(
candidate_count)
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED.inc(
increment=len(raw_candidates))
candidates = [
candidate for candidate in raw_candidates
if candidate.reviewee_key != reviewer_key]
retries = 0
while True:
if not candidates or retries >= max_retries:
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE.inc()
raise domain.NotAssignableError(
'No reviews assignable for unit %s and reviewer %s' % (
unit_id, repr(reviewer_key)))
candidate = cls._choose_assignment_candidate(candidates)
candidates.remove(candidate)
assigned_key = cls._attempt_review_assignment(
candidate.key(), reviewer_key, candidate.change_date)
if not assigned_key:
retries += 1
else:
COUNTER_GET_NEW_REVIEW_SUCCESS.inc()
return assigned_key
except Exception, e:
COUNTER_GET_NEW_REVIEW_FAILED.inc()
raise e
@classmethod
def _choose_assignment_candidate(cls, candidates):
"""Seam that allows different choice functions in tests."""
return random.choice(candidates)
@classmethod
@db.transactional(xg=True)
def _attempt_review_assignment(
cls, review_summary_key, reviewer_key, last_change_date):
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED.inc()
summary = entities.get(review_summary_key)
if not summary:
raise KeyError('No review summary found with key %s' % repr(
review_summary_key))
if summary.change_date != last_change_date:
# The summary has changed since we queried it. We cannot know for
# sure what the edit was, but let's skip to the next one because it
# was probably a review assignment.
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED.inc()
return
step = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(summary.submission_key, reviewer_key))
if not step:
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_summary_key=summary.key(),
reviewee_key=summary.reviewee_key, reviewer_key=reviewer_key,
state=domain.REVIEW_STATE_ASSIGNED,
submission_key=summary.submission_key, unit_id=summary.unit_id)
else:
if step.state == domain.REVIEW_STATE_COMPLETED:
# Reviewer has previously done this review and the review
# has been deleted. Skip to the next one.
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED.inc()
return
if step.removed:
# We can reassign the existing review step.
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING.inc()
step.removed = False
step.assigner_kind = domain.ASSIGNER_KIND_AUTO
step.state = domain.REVIEW_STATE_ASSIGNED
else:
# Reviewee has already reviewed or is already assigned to review
# this submission, so we cannot reassign the step.
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED.inc()
return
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
return entities.put([step, summary])[0]
@classmethod
def get_review_step_keys_by(cls, unit_id, reviewer_key):
"""Gets the keys of all review steps in a unit for a reviewer.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewer_key: db.Key of models.models.Student. The author of the
requested reviews.
Returns:
[db.Key of peer.ReviewStep].
"""
COUNTER_GET_REVIEW_STEP_KEYS_BY_START.inc()
try:
query = peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.reviewer_key.name, reviewer_key
).filter(
peer.ReviewStep.unit_id.name, unit_id
).order(
peer.ReviewStep.create_date.name,
)
keys = [key for key in query.fetch(_REVIEW_STEP_QUERY_LIMIT)]
except Exception as e:
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED.inc()
raise e
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS.inc()
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED.inc(increment=len(keys))
return keys
@classmethod
def get_review_steps_by_keys(cls, keys):
"""Gets review steps by their keys.
Args:
keys: [db.Key of peer.ReviewStep]. Keys to fetch.
Returns:
[domain.ReviewStep or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_review_step(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review_step(cls, model):
if model is None:
return
return domain.ReviewStep(
assigner_kind=model.assigner_kind, change_date=model.change_date,
create_date=model.create_date, key=model.key(),
removed=model.removed, review_key=model.review_key,
review_summary_key=model.review_summary_key,
reviewee_key=model.reviewee_key, reviewer_key=model.reviewer_key,
state=model.state, submission_key=model.submission_key,
unit_id=model.unit_id
)
@classmethod
def get_reviews_by_keys(cls, keys):
"""Gets reviews by their keys.
Args:
keys: [db.Key of review.Review]. Keys to fetch.
Returns:
[domain.Review or None]. Missed keys return None in place in result
list.
"""
return [cls._make_domain_review(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review(cls, model):
if model is None:
return
return domain.Review(contents=model.contents, key=model.key())
@classmethod
def get_submission_and_review_step_keys(cls, unit_id, reviewee_key):
"""Gets the submission key/review step keys for the given pair.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
domain.ConstraintError: if multiple review summary keys were found
for the given unit_id, reviewee_key pair.
KeyError: if there is no review summary for the given unit_id,
reviewee pair.
Returns:
(db.Key of Submission, [db.Key of peer.ReviewStep]) if submission
found for given unit_id, reviewee_key pair; None otherwise.
"""
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START.inc()
try:
submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(unit_id, reviewee_key))
submission = entities.get(submission_key)
if not submission:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS.inc(
)
return
step_keys_query = peer.ReviewStep.all(
keys_only=True
).filter(
peer.ReviewStep.submission_key.name, submission_key
)
step_keys = step_keys_query.fetch(_REVIEW_STEP_QUERY_LIMIT)
results = (submission_key, step_keys)
except Exception as e:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED.inc()
raise e
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS.inc()
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED.inc(
increment=len(step_keys))
return results
@classmethod
def get_submissions_by_keys(cls, keys):
"""Gets submissions by their keys.
Args:
keys: [db.Key of review.Submission]. Keys to fetch.
Returns:
[domain.Submission or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_submission(model) for model in entities.get(keys)]
@classmethod
def _make_domain_submission(cls, model):
if model is None:
return
return domain.Submission(contents=model.contents, key=model.key())
@classmethod
def start_review_process_for(cls, unit_id, submission_key, reviewee_key):
"""Registers a new submission with the review subsystem.
Once registered, reviews can be assigned against a given submission,
either by humans or by machine. No reviews are assigned during
registration -- this method merely makes them assignable.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
db.BadValueError: if passed args are invalid.
domain.ReviewProcessAlreadyStartedError: if the review process has
already been started for this student's submission.
Returns:
db.Key of created ReviewSummary.
"""
try:
COUNTER_START_REVIEW_PROCESS_FOR_START.inc()
key = cls._create_review_summary(
reviewee_key, submission_key, unit_id)
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS.inc()
return key
except Exception as e:
COUNTER_START_REVIEW_PROCESS_FOR_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _create_review_summary(cls, reviewee_key, submission_key, unit_id):
collision = peer.ReviewSummary.get_by_key_name(
peer.ReviewSummary.key_name(submission_key))
if collision:
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED.inc()
raise domain.ReviewProcessAlreadyStartedError()
return peer.ReviewSummary(
reviewee_key=reviewee_key, submission_key=submission_key,
unit_id=unit_id,
).put()
@classmethod
def write_review(
cls, review_step_key, review_payload, mark_completed=True):
"""Writes a review, updating associated internal state.
If the passed step already has a review, that review will be updated. If
it does not have a review, a new one will be created with the passed
payload.
Args:
review_step_key: db.Key of peer.ReviewStep. The key of the review
step to update.
review_payload: string. New contents of the review.
mark_completed: boolean. If True, set the state of the review to
domain.REVIEW_STATE_COMPLETED. If False, leave the state as it
was.
Raises:
domain.ConstraintError: if no review found for the review step.
domain.RemovedError: if the step for the review is removed.
domain.TransitionError: if mark_completed was True but the step was
already in domain.REVIEW_STATE_COMPLETED.
KeyError: if no review step was found with review_step_key.
Returns:
db.Key of peer.ReviewStep: key of the written review step.
"""
COUNTER_WRITE_REVIEW_START.inc()
try:
step_key = cls._update_review_contents_and_change_state(
review_step_key, review_payload, mark_completed)
except Exception as e:
COUNTER_WRITE_REVIEW_FAILED.inc()
raise e
COUNTER_WRITE_REVIEW_SUCCESS.inc()
return step_key
@classmethod
@db.transactional(xg=True)
def _update_review_contents_and_change_state(
cls, review_step_key, review_payload, mark_completed):
should_increment_created_new_review = False
should_increment_updated_existing_review = False
should_increment_assigned_to_completed = False
should_increment_expired_to_completed = False
step = entities.get(review_step_key)
if not step:
COUNTER_WRITE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
elif step.removed:
raise domain.RemovedError(
'Unable to process step %s' % repr(step.key()), step.removed)
elif mark_completed and step.state == domain.REVIEW_STATE_COMPLETED:
raise domain.TransitionError(
'Unable to transition step %s' % repr(step.key()),
step.state, domain.REVIEW_STATE_COMPLETED)
if step.review_key:
review_to_update = entities.get(step.review_key)
if review_to_update:
should_increment_updated_existing_review = True
else:
review_to_update = student_work.Review(
contents=review_payload, reviewer_key=step.reviewer_key,
unit_id=step.unit_id)
step.review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(step.unit_id, step.reviewer_key))
should_increment_created_new_review = True
if not review_to_update:
COUNTER_WRITE_REVIEW_REVIEW_MISS.inc()
raise domain.ConstraintError(
'No review found with key %s' % repr(step.review_key))
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_WRITE_REVIEW_SUMMARY_MISS.inc()
raise domain.ConstraintError(
'No review summary found with key %s' % repr(
step.review_summary_key))
review_to_update.contents = review_payload
updated_step_key = None
if not mark_completed:
_, updated_step_key = entities.put([review_to_update, step])
else:
if step.state == domain.REVIEW_STATE_ASSIGNED:
should_increment_assigned_to_completed = True
elif step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_expired_to_completed = True
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_COMPLETED
summary.increment_count(step.state)
_, updated_step_key, _ = entities.put(
[review_to_update, step, summary])
if should_increment_created_new_review:
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW.inc()
elif should_increment_updated_existing_review:
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW.inc()
if should_increment_assigned_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP.inc()
elif should_increment_expired_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP.inc()
return updated_step_key
custom_module = None
def register_module():
"""Registers this module in the registry."""
import modules.dashboard # pylint: disable-msg=g-import-not-at-top
from modules.review import stats # pylint: disable-msg=g-import-not-at-top
from modules.review import cron # pylint: disable-msg=g-import-not-at-top
# register custom dashboard section
modules.dashboard.dashboard.DashboardRegistry.add_custom_analytics_section(
stats.PeerReviewStatsHandler)
# register this peer review implementation
models.review.ReviewsProcessor.set_peer_matcher(Manager)
# register cron handler
cron_handlers = [(
'/cron/expire_old_assigned_reviews',
cron.ExpireOldAssignedReviewsHandler)]
global custom_module
custom_module = custom_modules.Module(
'Peer Review Engine',
'A set of classes for managing peer review process.',
cron_handlers, [])
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and constants for use by internal and external clients."""
__author__ = [
'johncox@google.com (John Cox)',
]
# Identifier for reviews that have been computer-assigned.
ASSIGNER_KIND_AUTO = 'AUTO'
# Identifier for reviews that have been assigned by a human.
ASSIGNER_KIND_HUMAN = 'HUMAN'
ASSIGNER_KINDS = (
ASSIGNER_KIND_AUTO,
ASSIGNER_KIND_HUMAN,
)
# Maximum number of ReviewSteps with removed = False, in any REVIEW_STATE, that
# can exist in the backend at a given time.
MAX_UNREMOVED_REVIEW_STEPS = 100
# State of a review that is currently assigned, either by a human or by machine.
REVIEW_STATE_ASSIGNED = 'ASSIGNED'
# State of a review that is complete and may be shown to the reviewee, provided
# the reviewee is themself in a state to see their reviews.
REVIEW_STATE_COMPLETED = 'COMPLETED'
# State of a review that used to be assigned but the assignment has been
# expired. Only machine-assigned reviews can be expired.
REVIEW_STATE_EXPIRED = 'EXPIRED'
REVIEW_STATES = (
REVIEW_STATE_ASSIGNED,
REVIEW_STATE_COMPLETED,
REVIEW_STATE_EXPIRED,
)
class Error(Exception):
"""Base error class."""
class ConstraintError(Error):
"""Raised when data is found indicating a constraint is violated."""
class NotAssignableError(Error):
"""Raised when review assignment is requested but cannot be satisfied."""
class RemovedError(Error):
"""Raised when an op cannot be performed on a step because it is removed."""
def __init__(self, message, value):
"""Constructs a new RemovedError."""
super(RemovedError, self).__init__(message)
self.value = value
def __str__(self):
return '%s: removed is %s' % (self.message, self.value)
class ReviewProcessAlreadyStartedError(Error):
"""Raised when someone attempts to start a review process in progress."""
class TransitionError(Error):
"""Raised when an invalid state transition is attempted."""
def __init__(self, message, before, after):
"""Constructs a new TransitionError.
Args:
message: string. Exception message.
before: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition from.
after: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition to.
"""
super(TransitionError, self).__init__(message)
self.after = after
self.before = before
def __str__(self):
return '%s: attempted to transition from %s to %s' % (
self.message, self.before, self.after)
class Review(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
class ReviewStep(object):
"""Domain object for the status of a single review at a point in time."""
def __init__(
self, assigner_kind=None, change_date=None, create_date=None, key=None,
removed=None, review_key=None, review_summary_key=None,
reviewee_key=None, reviewer_key=None, state=None, submission_key=None,
unit_id=None):
self._assigner_kind = assigner_kind
self._change_date = change_date
self._create_date = create_date
self._key = key
self._removed = removed
self._review_key = review_key
self._review_summary_key = review_summary_key
self._reviewee_key = reviewee_key
self._reviewer_key = reviewer_key
self._state = state
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigner_kind(self):
return self._assigner_kind
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def is_assigned(self):
"""Predicate for whether the step is in REVIEW_STATE_ASSIGNED."""
return self.state == REVIEW_STATE_ASSIGNED
@property
def is_completed(self):
"""Predicate for whether the step is in REVIEW_STATE_COMPLETED."""
return self.state == REVIEW_STATE_COMPLETED
@property
def is_expired(self):
"""Predicate for whether the step is in REVIEW_STATE_EXPIRED."""
return self.state == REVIEW_STATE_EXPIRED
@property
def key(self):
return self._key
@property
def removed(self):
return self._removed
@property
def review_key(self):
return self._review_key
@property
def review_summary_key(self):
return self._review_summary_key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def reviewer_key(self):
return self._reviewer_key
@property
def state(self):
return self._state
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class ReviewSummary(object):
"""Domain object for review state aggregate entities."""
def __init__(
self, assigned_count=None, completed_count=None, change_date=None,
create_date=None, key=None, reviewee_key=None, submission_key=None,
unit_id=None):
self._assigned_count = assigned_count
self._completed_count = completed_count
self._change_date = change_date
self._create_date = create_date
self._key = key
self._reviewee_key = reviewee_key
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigned_count(self):
return self._assigned_count
@property
def completed_count(self):
return self._completed_count
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def key(self):
return self._key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class Submission(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal implementation details of the peer review subsystem.
Public classes, including domain objects, can be found in domain.py and
models/student_work.py. Entities declared here should not be used by external
clients.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import counters
from models import models
from models import student_work
from modules.review import domain
from google.appengine.ext import db
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX = counters.PerfCounter(
'gcb-pr-increment-count-count-aggregate-exceeded-max',
('number of times increment_count() failed because the new aggregate of '
'the counts would have exceeded domain.MAX_UNREMOVED_REVIEW_STEPS'))
class ReviewSummary(student_work.BaseEntity):
"""Object that tracks the aggregate state of reviews for a submission."""
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Strong counters. Callers should never manipulate these directly. Instead,
# use decrement|increment_count.
# Count of ReviewStep entities for this submission currently in state
# STATE_ASSIGNED.
assigned_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_COMPLETED.
completed_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_EXPIRED.
expired_count = db.IntegerProperty(default=0, required=True)
# Key of the student who wrote the submission being reviewed.
reviewee_key = student_work.KeyProperty(
kind=models.Student.kind(), required=True)
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewSummary."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
submission_key = kwargs.get('submission_key')
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key)
super(ReviewSummary, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key):
"""Creates a key_name string for datastore operations."""
return '(review_summary:%s)' % submission_key.id_or_name()
def _check_count(self):
count_sum = (
self.assigned_count + self.completed_count + self.expired_count)
if count_sum >= domain.MAX_UNREMOVED_REVIEW_STEPS:
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX.inc()
raise db.BadValueError(
'Unable to increment %s to %s; max is %s' % (
self.kind(), count_sum, domain.MAX_UNREMOVED_REVIEW_STEPS))
def decrement_count(self, state):
"""Decrements the count for the given state enum; does not save.
Args:
state: string. State indicating counter to decrement; must be one of
domain.REVIEW_STATES.
Raises:
ValueError: if state not in domain.REVIEW_STATES.
"""
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count -= 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count -= 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count -= 1
else:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
def increment_count(self, state):
"""Increments the count for the given state enum; does not save.
Args:
state: string. State indicating counter to increment; must be one of
domain.REVIEW_STATES.
Raises:
db.BadValueError: if incrementing the counter would cause the sum of
all *_counts to exceed domain.MAX_UNREMOVED_REVIEW_STEPS.
ValueError: if state not in domain.REVIEW_STATES
"""
if state not in domain.REVIEW_STATES:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
self._check_count()
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count += 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count += 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count += 1
class ReviewStep(student_work.BaseEntity):
"""Object that represents a single state of a review."""
# Audit trail information.
# Identifier for the kind of thing that did the assignment. Used to
# distinguish between assignments done by humans and those done by the
# review subsystem.
assigner_kind = db.StringProperty(
choices=domain.ASSIGNER_KINDS, required=True)
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Repeated data to allow filtering/ordering in queries.
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Unit this review step is part of.
unit_id = db.StringProperty(required=True)
# State information.
# State of this review step.
state = db.StringProperty(choices=domain.REVIEW_STATES, required=True)
# Whether or not the review has been removed. By default removed entities
# are ignored for most queries.
removed = db.BooleanProperty(default=False)
# Pointers that tie the work and people involved together.
# Key of the Review associated with this step.
review_key = student_work.KeyProperty(kind=student_work.Review.kind())
# Key of the associated ReviewSummary.
review_summary_key = student_work.KeyProperty(kind=ReviewSummary.kind())
# Key of the Student being reviewed.
reviewee_key = student_work.KeyProperty(kind=models.Student.kind())
# Key of the Student doing this review.
reviewer_key = student_work.KeyProperty(kind=models.Student.kind())
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewStep."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
reviewer_key = kwargs.get('reviewer_key')
submission_key = kwargs.get('submission_key')
assert reviewer_key, 'Missing required reviewer_key property'
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key, reviewer_key)
super(ReviewStep, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key, reviewer_key):
"""Creates a key_name string for datastore operations."""
return '(review_step:%s:%s)' % (
submission_key.id_or_name(), reviewer_key.id_or_name())
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting updates to basic course settings."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.oeditor import oeditor
import yaml
import messages
from google.appengine.api import users
def is_editable_fs(app_context):
return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem
class CourseSettingsRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class CourseSettingsHandler(ApplicationHandler):
"""Course settings handler."""
def post_edit_basic_course_settings(self):
"""Handles editing of course.yaml."""
assert is_editable_fs(self.app_context)
# Check if course.yaml exists; create if not.
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
self.redirect(self.get_action_url(
'edit_basic_settings', key='/course.yaml'))
def get_edit_basic_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard?action=settings')
rest_url = self.canonicalize_url('/rest/course/settings')
form_html = oeditor.ObjectEditor.get_html_for(
self,
CourseSettingsRESTHandler.REGISTORY.get_json_schema(),
CourseSettingsRESTHandler.REGISTORY.get_schema_dict(),
key, rest_url, exit_url,
required_modules=CourseSettingsRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
class CourseSettingsRESTHandler(BaseRESTHandler):
"""Provides REST API for a file."""
REGISTORY = courses.create_course_registry()
REQUIRED_MODULES = [
'inputex-date', 'inputex-string', 'inputex-textarea', 'inputex-url',
'inputex-checkbox', 'inputex-select', 'inputex-uneditable', 'gcb-rte']
URI = '/rest/course/settings'
@classmethod
def validate_content(cls, content):
yaml.safe_load(content)
def get_course_dict(self):
return self.get_course().get_environ(self.app_context)
def get_group_id(self, email):
if not email or '@googlegroups.com' not in email:
return None
return email.split('@')[0]
def get_groups_web_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/group/' + group_id
def get_groups_embed_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/forum/embed/?place=forum/' + group_id
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert is_editable_fs(self.app_context)
key = self.request.get('key')
if not CourseSettingsRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
entity = {}
CourseSettingsRESTHandler.REGISTORY.convert_entity_to_json_entity(
self.get_course_dict(), entity)
# Render JSON response.
json_payload = transforms.dict_to_json(
entity,
CourseSettingsRESTHandler.REGISTORY.get_json_schema_dict())
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'basic-course-settings-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'basic-course-settings-put', {'key': key}):
return
if not CourseSettingsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
request_data = {}
CourseSettingsRESTHandler.REGISTORY.convert_json_to_entity(
transforms.loads(payload), request_data)
course_data = request_data['course']
if 'forum_email' in course_data.keys():
forum_email = course_data['forum_email']
forum_web_url = self.get_groups_web_url(forum_email)
if forum_web_url:
course_data['forum_url'] = forum_web_url
forum_web_url = self.get_groups_embed_url(forum_email)
if forum_web_url:
course_data['forum_embed_url'] = forum_web_url
if 'announcement_list_email' in course_data.keys():
announcement_email = course_data['announcement_list_email']
announcement_web_url = self.get_groups_web_url(announcement_email)
if announcement_web_url:
course_data['announcement_list_url'] = announcement_web_url
entity = courses.deep_dict_merge(request_data, self.get_course_dict())
content = yaml.safe_dump(entity)
try:
self.validate_content(content)
content_stream = vfs.string_to_stream(unicode(content))
except Exception as e: # pylint: disable=W0703
transforms.send_json_response(self, 412, 'Validation error: %s' % e)
return
# Store new file content.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
# Send reply.
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting online file editing."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import base64
import cgi
import os
import urllib
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.oeditor import oeditor
import yaml
import messages
from google.appengine.api import users
ALLOWED_ASSET_UPLOAD_BASE = 'assets/img'
MAX_ASSET_UPLOAD_SIZE_K = 500
def is_editable_fs(app_context):
return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem
class FilesRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class FileManagerAndEditor(ApplicationHandler):
"""An editor for editing and managing files."""
def post_create_or_edit_settings(self):
"""Handles creation or/and editing of course.yaml."""
assert is_editable_fs(self.app_context)
# Check if course.yaml exists; create if not.
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
self.redirect(self.get_action_url('edit_settings', key='/course.yaml'))
def get_edit_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard?action=settings')
rest_url = self.canonicalize_url('/rest/files/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
FilesItemRESTHandler.SCHEMA_JSON,
FilesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=FilesItemRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_add_asset(self):
"""Show an upload dialog for assets."""
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(
AssetItemRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
AssetItemRESTHandler.SCHEMA_JSON,
AssetItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
'', rest_url, exit_url, save_method='upload', auto_return=True,
required_modules=AssetItemRESTHandler.REQUIRED_MODULES,
save_button_caption='Upload')
template_values = {}
template_values['page_title'] = self.format_title('Upload Asset')
template_values['page_description'] = messages.UPLOAD_ASSET_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_delete_asset(self):
"""Show an review/delete page for assets."""
uri = self.request.get('uri')
exit_url = self.canonicalize_url('/dashboard?action=assets')
rest_url = self.canonicalize_url(
AssetUriRESTHandler.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(FilesItemRESTHandler.URI),
urllib.urlencode({
'key': uri,
'xsrf_token': cgi.escape(self.create_xsrf_token('delete-asset'))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self,
AssetUriRESTHandler.SCHEMA_JSON,
AssetUriRESTHandler.SCHEMA_ANNOTATIONS_DICT,
uri, rest_url, exit_url, save_method='',
delete_url=delete_url, delete_method='delete')
template_values = {}
template_values['page_title'] = self.format_title('View Asset')
template_values['main_content'] = form_html
self.render_page(template_values)
class FilesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a file."""
SCHEMA_JSON = """
{
"id": "Text File",
"type": "object",
"description": "Text File",
"properties": {
"key" : {"type": "string"},
"encoding" : {"type": "string"},
"content": {"type": "text"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Text File'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'encoding', '_inputex'], {
'label': 'Encoding', '_type': 'uneditable'}),
(['properties', 'content', '_inputex'], {
'label': 'Content', '_type': 'text'})]
REQUIRED_MODULES = [
'inputex-string', 'inputex-textarea', 'inputex-select',
'inputex-uneditable']
URI = '/rest/files/item'
FILE_ENCODING_TEXT = 'text/utf-8'
FILE_ENCODING_BINARY = 'binary/base64'
FILE_EXTENTION_TEXT = ['.js', '.css', '.yaml', '.html', '.csv']
@classmethod
def is_text_file(cls, filename):
# TODO(psimakov): this needs to be better and not use linear search
for extention in cls.FILE_EXTENTION_TEXT:
if filename.endswith(extention):
return True
return False
@classmethod
def validate_content(cls, filename, content):
# TODO(psimakov): handle more file types here
if filename.endswith('.yaml'):
yaml.safe_load(content)
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert is_editable_fs(self.app_context)
key = self.request.get('key')
if not FilesRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
entity = {'key': key}
if self.is_text_file(key):
entity['encoding'] = self.FILE_ENCODING_TEXT
entity['content'] = vfs.stream_to_string(stream)
else:
entity['encoding'] = self.FILE_ENCODING_BINARY
entity['content'] = base64.b64encode(stream.read())
# Render JSON response.
json_payload = transforms.dict_to_json(
entity,
FilesItemRESTHandler.SCHEMA_DICT)
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'file-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'file-put', {'key': key}):
return
# TODO(psimakov): we don't allow editing of all files; restrict further
if not FilesRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
entity = transforms.loads(payload)
encoding = entity['encoding']
content = entity['content']
# Validate the file content.
errors = []
try:
if encoding == self.FILE_ENCODING_TEXT:
content_stream = vfs.string_to_stream(content)
elif encoding == self.FILE_ENCODING_BINARY:
content_stream = base64.b64decode(content)
else:
errors.append('Unknown encoding: %s.' % encoding)
self.validate_content(key, content)
except Exception as e: # pylint: disable=W0703
errors.append('Validation error: %s' % e)
if errors:
transforms.send_json_response(self, 412, ''.join(errors))
return
# Store new file content.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
# Send reply.
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-asset', {'key': key}):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
fs = self.app_context.fs.impl
path = fs.physical_to_logical(key)
if not fs.isfile(path):
transforms.send_json_response(
self, 403, 'File does not exist.', None)
return
fs.delete(path)
transforms.send_json_response(self, 200, 'Deleted.')
class AssetItemRESTHandler(BaseRESTHandler):
"""Provides REST API for managing assets."""
URI = '/rest/assets/item'
SCHEMA_JSON = """
{
"id": "Asset",
"type": "object",
"description": "Asset",
"properties": {
"base": {"type": "string"},
"file": {"type": "string", "optional": true}
}
}
"""
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Upload Asset'),
(['properties', 'base', '_inputex'], {
'label': 'Base', '_type': 'uneditable'}),
(['properties', 'file', '_inputex'], {
'label': 'File', '_type': 'file'})]
REQUIRED_MODULES = [
'inputex-string', 'inputex-uneditable', 'inputex-file',
'io-upload-iframe']
def get(self):
"""Provides empty initial content for asset upload editor."""
# TODO(jorr): Pass base URI through as request param when generalized.
json_payload = {'file': '', 'base': ALLOWED_ASSET_UPLOAD_BASE}
transforms.send_json_response(
self, 200, 'Success.', payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token('asset-upload'))
def post(self):
"""Handles asset uploads."""
assert is_editable_fs(self.app_context)
if not FilesRights.can_add(self):
transforms.send_json_file_upload_response(
self, 401, 'Access denied.')
return
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, 'asset-upload', None):
return
payload = transforms.loads(request['payload'])
base = payload['base']
assert base == ALLOWED_ASSET_UPLOAD_BASE
upload = self.request.POST['file']
filename = os.path.split(upload.filename)[1]
assert filename
physical_path = os.path.join(base, filename)
fs = self.app_context.fs.impl
path = fs.physical_to_logical(physical_path)
if fs.isfile(path):
transforms.send_json_file_upload_response(
self, 403, 'Cannot overwrite existing file.')
return
content = upload.file.read()
upload.file.seek(0)
if len(content) > MAX_ASSET_UPLOAD_SIZE_K * 1024:
transforms.send_json_response(
self, 403,
'Max allowed file upload size is %dK' % MAX_ASSET_UPLOAD_SIZE_K,
None)
return
fs.put(path, upload.file)
transforms.send_json_file_upload_response(self, 200, 'Saved.')
class AssetUriRESTHandler(BaseRESTHandler):
"""Provides REST API for managing asserts by means of their URIs."""
# TODO(jorr): Refactor the asset management classes to have more meaningful
# REST URI's and class names
URI = '/rest/assets/uri'
SCHEMA_JSON = """
{
"id": "Asset",
"type": "object",
"description": "Asset",
"properties": {
"uri": {"type": "string"}
}
}
"""
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Image or Document'),
(['properties', 'uri', '_inputex'], {
'label': 'Asset',
'_type': 'uneditable',
'visu': {
'visuType': 'funcName',
'funcName': 'renderAsset'}})]
def get(self):
"""Handles REST GET verb and returns the uri of the asset."""
uri = self.request.get('key')
if not FilesRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': uri})
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={'uri': uri},
xsrf_token=XsrfTokenManager.create_xsrf_token('asset-delete'))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the dashboard."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
def assemble_sanitized_message(text, link):
node_list = safe_dom.NodeList()
if text:
node_list.append(safe_dom.Text(text))
node_list.append(safe_dom.Entity(' '))
if link:
node_list.append(safe_dom.Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
ABOUT_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
This information is configured by an administrator from the Admin pages.
""", None)
ASSESSMENT_CONTENT_DESCRIPTION = assemble_sanitized_message("""
Assessment questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSESSMENT_DETAILS_DESCRIPTION = assemble_sanitized_message("""
Properties and restrictions of your assessment (YAML format).
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
ASSESSMENT_EDITOR_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSETS_DESCRIPTION = assemble_sanitized_message("""
These are all the assets for your course. You can upload new images and
documents here, after which you can use them in your lessons and activities.
You may create, edit, and delete activities and assessments from the Outline
page. All other assets must be edited by an administrator.
""", None)
ASSIGNMENTS_MENU_DESCRIPTION = assemble_sanitized_message("""
Select a peer-reviewed assignment and enter a student's email address to view
their assignment submission and any associated reviews.
""", None)
CONTENTS_OF_THE_COURSE_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings. Edit it using the buttons
at the right.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
COURSE_OUTLINE_DESCRIPTION = assemble_sanitized_message(
'Build, organize and preview your course here.',
'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
COURSE_OUTLINE_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Click up/down arrows to re-order units, or lessons within units. To move a
lesson between units, edit that lesson from the outline page and change its
parent unit.
""", None)
DATA_FILES_DESCRIPTION = assemble_sanitized_message("""
The lesson.csv file contains the contents of your lesson. The unit.csv file
contains the course related content shown on the homepage. These files are
located in your Course Builder installation. Edit them directly with an editor
like Notepad++. Be careful, some editors will add extra characters, which may
prevent the uploading of these files.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
EDIT_SETTINGS_DESCRIPTION = assemble_sanitized_message("""
The course.yaml file contains many course settings.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
IMPORT_COURSE_DESCRIPTION = assemble_sanitized_message("""
Import the contents of another course into this course. Both courses must be on
the same Google App Engine instance.
""", None)
LESSON_ACTIVITY_DESCRIPTION = assemble_sanitized_message("""
Create an activity by entering the correct syntax above.
""", ('https://code.google.com/p/course-builder/wiki/CreateActivities'
'#Writing_activities'))
LESSON_ACTIVITY_LISTED_DESCRIPTION = """
Whether the activity should be viewable as a stand-alone item in the unit index.
"""
LESSON_ACTIVITY_TITLE_DESCRIPTION = """
This appears above your activity.
"""
LESSON_OBJECTIVES_DESCRIPTION = """
The lesson body is displayed to students above the video in the default
template.
"""
LESSON_VIDEO_ID_DESCRIPTION = """
Provide a YouTube video ID to embed a video.
"""
LESSON_NOTES_DESCRIPTION = """
Provide a URL that points to the notes for this lesson (if applicable). These
notes can be accessed by clicking on the 'Text Version' button on the lesson
page.
"""
LINK_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Links will appear in your outline and will take students directly to the URL.
""", None)
LINK_EDITOR_URL_DESCRIPTION = """
Links to external sites must start with 'http' or https'.
"""
PAGES_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
REVIEWER_FEEDBACK_FORM_DESCRIPTION = assemble_sanitized_message("""
Review form questions and answers (JavaScript format).
""", 'https://code.google.com/p/course-builder/wiki/PeerReview')
SETTINGS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Settings')
UNIT_EDITOR_DESCRIPTION = assemble_sanitized_message("""
Units contain lessons and acitivities.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
UPLOAD_ASSET_DESCRIPTION = assemble_sanitized_message("""
Choose a file to upload to this Google App Engine instance. Learn more about
file storage and hosting.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Assets')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting unit and lesson editing."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import logging
import urllib
from common import tags
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from modules.oeditor import oeditor
from tools import verify
import filer
import messages
DRAFT_TEXT = 'Private'
PUBLISHED_TEXT = 'Public'
# The editor has severe limitations for editing nested lists of objects. First,
# it does not allow one to move a lesson from one unit to another. We need a way
# of doing that. Second, JSON schema specification does not seem to support a
# type-safe array, which has objects of different types. We also want that
# badly :). All in all - using generic schema-based object editor for editing
# nested arrayable polymorphic attributes is a pain...
STATUS_ANNOTATION = oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', DRAFT_TEXT,
PUBLISHED_TEXT, class_name='split-from-main-group')
class CourseOutlineRights(object):
"""Manages view/edit rights for course outline."""
@classmethod
def can_view(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class UnitLessonEditor(ApplicationHandler):
"""An editor for the unit and lesson titles."""
def get_import_course(self):
"""Shows setup form for course import."""
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT()
if not annotations:
template_values['main_content'] = 'No courses to import from.'
self.render_page(template_values)
return
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(ImportCourseRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
ImportCourseRESTHandler.SCHEMA_JSON,
annotations,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Import',
required_modules=ImportCourseRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
template_values['page_description'] = messages.IMPORT_COURSE_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit_lesson(self):
"""Shows editor for the list of unit and lesson titles."""
key = self.request.get('key')
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(UnitLessonTitleRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
UnitLessonTitleRESTHandler.SCHEMA_JSON,
UnitLessonTitleRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=UnitLessonTitleRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Course Outline')
template_values[
'page_description'] = messages.COURSE_OUTLINE_EDITOR_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def post_add_lesson(self):
"""Adds new lesson to a first unit of the course."""
course = courses.Course(self)
first_unit = None
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_UNIT:
first_unit = unit
break
if first_unit:
lesson = course.add_lesson(first_unit)
course.save()
# TODO(psimakov): complete 'edit_lesson' view
self.redirect(self.get_action_url(
'edit_lesson', key=lesson.lesson_id,
extra_args={'is_newly_created': 1}))
else:
self.redirect('/dashboard')
def post_add_unit(self):
"""Adds new unit to a course."""
course = courses.Course(self)
unit = course.add_unit()
course.save()
self.redirect(self.get_action_url(
'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1}))
def post_add_link(self):
"""Adds new link to a course."""
course = courses.Course(self)
link = course.add_link()
link.href = ''
course.save()
self.redirect(self.get_action_url(
'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))
def post_add_assessment(self):
"""Adds new assessment to a course."""
course = courses.Course(self)
assessment = course.add_assessment()
course.save()
self.redirect(self.get_action_url(
'edit_assessment', key=assessment.unit_id,
extra_args={'is_newly_created': 1}))
def _render_edit_form_for(
self, rest_handler_cls, title, annotations_dict=None,
delete_xsrf_token='delete-unit', page_description=None):
"""Renders an editor form for a given REST handler class."""
if not annotations_dict:
annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT
key = self.request.get('key')
extra_args = {}
if self.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(rest_handler_cls.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(delete_xsrf_token))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self,
rest_handler_cls.SCHEMA_JSON,
annotations_dict,
key, rest_url, exit_url,
extra_args=extra_args,
delete_url=delete_url, delete_method='delete',
read_only=not filer.is_editable_fs(self.app_context),
required_modules=rest_handler_cls.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit %s' % title)
if page_description:
template_values['page_description'] = page_description
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit(self):
"""Shows unit editor."""
self._render_edit_form_for(
UnitRESTHandler, 'Unit',
page_description=messages.UNIT_EDITOR_DESCRIPTION)
def get_edit_link(self):
"""Shows link editor."""
self._render_edit_form_for(
LinkRESTHandler, 'Link',
page_description=messages.LINK_EDITOR_DESCRIPTION)
def get_edit_assessment(self):
"""Shows assessment editor."""
self._render_edit_form_for(
AssessmentRESTHandler, 'Assessment',
page_description=messages.ASSESSMENT_EDITOR_DESCRIPTION)
def get_edit_lesson(self):
"""Shows the lesson/activity editor."""
self._render_edit_form_for(
LessonRESTHandler, 'Lessons and Activities',
annotations_dict=LessonRESTHandler.get_schema_annotations_dict(
courses.Course(self).get_units()),
delete_xsrf_token='delete-lesson')
class CommonUnitRESTHandler(BaseRESTHandler):
"""A common super class for all unit REST handlers."""
def unit_to_dict(self, unused_unit):
"""Converts a unit to a dictionary representation."""
raise Exception('Not implemented')
def apply_updates(
self, unused_unit, unused_updated_unit_dict, unused_errors):
"""Applies changes to a unit; modifies unit input argument."""
raise Exception('Not implemented')
def get(self):
"""A GET REST method shared by all unit types."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
message = ['Success.']
if self.request.get('is_newly_created'):
unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower()
message.append(
'New %s has been created and saved.' % unit_type)
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=self.unit_to_dict(unit),
xsrf_token=XsrfTokenManager.create_xsrf_token('put-unit'))
def put(self):
"""A PUT REST method shared by all unit types."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updated_unit_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
errors = []
self.apply_updates(unit, updated_unit_dict, errors)
if not errors:
course = courses.Course(self)
assert course.update_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-unit', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
course.delete_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
class UnitRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to unit."""
URI = '/rest/course/unit'
SCHEMA_JSON = """
{
"id": "Unit Entity",
"type": "object",
"description": "Unit",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Unit'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
STATUS_ANNOTATION]
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
def unit_to_dict(self, unit):
assert unit.type == 'U'
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, unused_errors):
unit.title = updated_unit_dict.get('title')
unit.now_available = not updated_unit_dict.get('is_draft')
class LinkRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to link."""
URI = '/rest/course/link'
SCHEMA_JSON = """
{
"id": "Link Entity",
"type": "object",
"description": "Link",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"url": {"optional": true, "type": "string"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Link'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'url', '_inputex'], {
'label': 'URL',
'description': messages.LINK_EDITOR_URL_DESCRIPTION}),
STATUS_ANNOTATION]
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
def unit_to_dict(self, unit):
assert unit.type == 'O'
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'url': unit.href,
'is_draft': not unit.now_available}
def apply_updates(self, unit, updated_unit_dict, unused_errors):
unit.title = updated_unit_dict.get('title')
unit.href = updated_unit_dict.get('url')
unit.now_available = not updated_unit_dict.get('is_draft')
class ImportCourseRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to course import."""
URI = '/rest/course/import'
SCHEMA_JSON = """
{
"id": "Import Course Entity",
"type": "object",
"description": "Import Course",
"properties": {
"course" : {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
@classmethod
def _get_course_list(cls):
# Make a list of courses user has the rights to.
course_list = []
for acourse in sites.get_all_courses():
if not roles.Roles.is_course_admin(acourse):
continue
if acourse == sites.get_course_for_current_request():
continue
course_list.append({
'value': acourse.raw,
'label': cgi.escape(acourse.get_title())})
return course_list
@classmethod
def SCHEMA_ANNOTATIONS_DICT(cls): # pylint: disable-msg=g-bad-name
"""Schema annotations are dynamic and include a list of courses."""
course_list = cls._get_course_list()
if not course_list:
return None
# Format annotations.
return [
(['title'], 'Import Course'),
(
['properties', 'course', '_inputex'],
{
'label': 'Available Courses',
'_type': 'select',
'choices': course_list})]
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
first_course_in_dropdown = self._get_course_list()[0]['value']
transforms.send_json_response(
self, 200, None,
payload_dict={'course': first_course_in_dropdown},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'import-course'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'import-course', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
course_raw = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)['course']
source = None
for acourse in sites.get_all_courses():
if acourse.raw == course_raw:
source = acourse
break
if not source:
transforms.send_json_response(
self, 404, 'Object not found.', {'raw': course_raw})
return
course = courses.Course(self)
errors = []
try:
course.import_from(source, errors)
except Exception as e: # pylint: disable-msg=broad-except
logging.exception(e)
errors.append('Import failed: %s' % e)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
course.save()
transforms.send_json_response(self, 200, 'Imported.')
class AssessmentRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to assessment."""
URI = '/rest/course/assessment'
SCHEMA_JSON = """
{
"id": "Assessment Entity",
"type": "object",
"description": "Assessment",
"properties": {
"key" : {"type": "string"},
"type": {"type": "string"},
"title": {"optional": true, "type": "string"},
"weight": {"optional": true, "type": "string"},
"content": {"optional": true, "type": "text"},
"workflow_yaml": {"optional": true, "type": "text"},
"review_form": {"optional": true, "type": "text"},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Assessment'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'type', '_inputex'], {
'label': 'Type', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'weight', '_inputex'], {'label': 'Weight'}),
(['properties', 'content', '_inputex'], {
'label': 'Assessment Content',
'description': str(messages.ASSESSMENT_CONTENT_DESCRIPTION)}),
(['properties', 'workflow_yaml', '_inputex'], {
'label': 'Assessment Details',
'description': str(messages.ASSESSMENT_DETAILS_DESCRIPTION)}),
(['properties', 'review_form', '_inputex'], {
'label': 'Reviewer Feedback Form',
'description': str(messages.REVIEWER_FEEDBACK_FORM_DESCRIPTION)}),
STATUS_ANNOTATION]
REQUIRED_MODULES = [
'inputex-select', 'inputex-string', 'inputex-textarea',
'inputex-uneditable']
def _get_assessment_path(self, unit):
return self.app_context.fs.impl.physical_to_logical(
courses.Course(self).get_assessment_filename(unit.unit_id))
def _get_review_form_path(self, unit):
return self.app_context.fs.impl.physical_to_logical(
courses.Course(self).get_review_form_filename(unit.unit_id))
def unit_to_dict(self, unit):
"""Assemble a dict with the unit data fields."""
assert unit.type == 'A'
path = self._get_assessment_path(unit)
fs = self.app_context.fs
if fs.isfile(path):
content = fs.get(path)
else:
content = ''
review_form_path = self._get_review_form_path(unit)
if review_form_path and fs.isfile(review_form_path):
review_form = fs.get(review_form_path)
else:
review_form = ''
return {
'key': unit.unit_id,
'type': verify.UNIT_TYPE_NAMES[unit.type],
'title': unit.title,
'weight': str(unit.weight if hasattr(unit, 'weight') else 0),
'content': content,
'workflow_yaml': unit.workflow_yaml,
'review_form': review_form,
'is_draft': not unit.now_available,
}
def apply_updates(self, unit, updated_unit_dict, errors):
"""Store the updated assessment."""
unit.title = updated_unit_dict.get('title')
try:
unit.weight = int(updated_unit_dict.get('weight'))
if unit.weight < 0:
errors.append('The weight must be a non-negative integer.')
except ValueError:
errors.append('The weight must be an integer.')
unit.now_available = not updated_unit_dict.get('is_draft')
course = courses.Course(self)
course.set_assessment_content(
unit, updated_unit_dict.get('content'), errors=errors)
unit.workflow_yaml = updated_unit_dict.get('workflow_yaml')
unit.workflow.validate(errors=errors)
# Only save the review form if the assessment needs human grading.
if not errors:
if course.needs_human_grader(unit):
course.set_review_form(
unit, updated_unit_dict.get('review_form'), errors=errors)
elif updated_unit_dict.get('review_form'):
errors.append(
'Review forms for auto-graded assessments should be empty.')
class UnitLessonTitleRESTHandler(BaseRESTHandler):
"""Provides REST API to unit and lesson titles."""
URI = '/rest/course/outline'
SCHEMA_JSON = """
{
"type": "object",
"description": "Course Outline",
"properties": {
"outline": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"},
"lessons": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"}
}
}
}
}
}
}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Course Outline'),
(['properties', 'outline', '_inputex'], {
'sortable': 'true',
'label': ''}),
([
'properties', 'outline', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'id', '_inputex'], {
'_type': 'hidden'}),
(['properties', 'outline', 'items', 'properties', 'lessons',
'_inputex'], {
'sortable': 'true',
'label': '',
'listAddLabel': 'Add a new lesson',
'listRemoveLabel': 'Delete'}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'title', '_inputex'], {
'_type': 'uneditable',
'label': ''}),
(['properties', 'outline', 'items', 'properties', 'lessons', 'items',
'properties', 'id', '_inputex'], {
'_type': 'hidden'})
]
REQUIRED_MODULES = [
'inputex-hidden', 'inputex-list', 'inputex-string',
'inputex-uneditable']
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
course = courses.Course(self)
outline_data = []
for unit in course.get_units():
lesson_data = []
for lesson in course.get_lessons(unit.unit_id):
lesson_data.append({
'title': lesson.title,
'id': lesson.lesson_id})
unit_title = unit.title
if verify.UNIT_TYPE_UNIT == unit.type:
unit_title = 'Unit %s - %s' % (unit.index, unit.title)
outline_data.append({
'title': unit_title,
'id': unit.unit_id,
'lessons': lesson_data})
transforms.send_json_response(
self, 200, None,
payload_dict={'outline': outline_data},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'unit-lesson-reorder'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'unit-lesson-reorder', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
payload_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
course = courses.Course(self)
course.reorder_units(payload_dict['outline'])
course.save()
transforms.send_json_response(self, 200, 'Saved.')
class LessonRESTHandler(BaseRESTHandler):
"""Provides REST API to handle lessons and activities."""
URI = '/rest/course/lesson'
# Note GcbRte relies on the structure of this schema. Do not change without
# checking the dependency.
SCHEMA_JSON = """
{
"id": "Lesson Entity",
"type": "object",
"description": "Lesson",
"properties": {
"key" : {"type": "string"},
"title" : {"type": "string"},
"unit_id": {"type": "string"},
"video" : {"type": "string", "optional": true},
"objectives" : {
"type": "string", "format": "html", "optional": true},
"notes" : {"type": "string", "optional": true},
"activity_title" : {"type": "string", "optional": true},
"activity_listed" : {"type": "boolean", "optional": true},
"activity": {"type": "string", "format": "text", "optional": true},
"is_draft": {"type": "boolean"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'gcb-rte', 'inputex-select', 'inputex-textarea',
'inputex-uneditable', 'inputex-checkbox']
@classmethod
def get_schema_annotations_dict(cls, units):
unit_list = []
for unit in units:
if unit.type == 'U':
unit_list.append({
'label': cgi.escape(
'Unit %s - %s' % (unit.index, unit.title)),
'value': unit.unit_id})
return [
(['title'], 'Lesson'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'title', '_inputex'], {'label': 'Title'}),
(['properties', 'unit_id', '_inputex'], {
'label': 'Parent Unit', '_type': 'select',
'choices': unit_list}),
# TODO(sll): The internal 'objectives' property should also be
# renamed.
(['properties', 'objectives', '_inputex'], {
'label': 'Lesson Body',
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'description': messages.LESSON_OBJECTIVES_DESCRIPTION}),
(['properties', 'video', '_inputex'], {
'label': 'Video ID',
'description': messages.LESSON_VIDEO_ID_DESCRIPTION}),
(['properties', 'notes', '_inputex'], {
'label': 'Notes',
'description': messages.LESSON_NOTES_DESCRIPTION}),
(['properties', 'activity_title', '_inputex'], {
'label': 'Activity Title',
'description': messages.LESSON_ACTIVITY_TITLE_DESCRIPTION}),
(['properties', 'activity_listed', '_inputex'], {
'label': 'Activity Listed',
'description': messages.LESSON_ACTIVITY_LISTED_DESCRIPTION}),
(['properties', 'activity', '_inputex'], {
'label': 'Activity',
'description': str(messages.LESSON_ACTIVITY_DESCRIPTION)}),
STATUS_ANNOTATION]
def get(self):
"""Handles GET REST verb and returns lesson object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
key = self.request.get('key')
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
assert lesson
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if lesson.has_activity and fs.isfile(path):
activity = fs.get(path)
else:
activity = ''
payload_dict = {
'key': key,
'title': lesson.title,
'unit_id': lesson.unit_id,
'objectives': lesson.objectives,
'video': lesson.video,
'notes': lesson.notes,
'activity_title': lesson.activity_title,
'activity_listed': lesson.activity_listed,
'activity': activity,
'is_draft': not lesson.now_available
}
message = ['Success.']
if self.request.get('is_newly_created'):
message.append('New lesson has been created and saved.')
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit'))
def put(self):
"""Handles PUT REST verb to save lesson and associated activity."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'lesson-edit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updates_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
lesson.title = updates_dict['title']
lesson.unit_id = updates_dict['unit_id']
lesson.objectives = updates_dict['objectives']
lesson.video = updates_dict['video']
lesson.notes = updates_dict['notes']
lesson.activity_title = updates_dict['activity_title']
lesson.activity_listed = updates_dict['activity_listed']
lesson.now_available = not updates_dict['is_draft']
activity = updates_dict.get('activity', '').strip()
errors = []
if activity:
lesson.has_activity = True
course.set_activity_content(lesson, activity, errors=errors)
else:
lesson.has_activity = False
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if fs.isfile(path):
fs.delete(path)
if not errors:
assert course.update_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-lesson', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
assert course.delete_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import os
import urllib
from common import jinja_filters
from common import safe_dom
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import HUMAN_READABLE_TIME_FORMAT
from controllers.utils import ReflectiveRequestHandler
import jinja2
import jinja2.exceptions
from models import config
from models import courses
from models import custom_modules
from models import jobs
from models import roles
from models import transforms
from models import utils
from models import vfs
from models.models import Student
from course_settings import CourseSettingsHandler
from course_settings import CourseSettingsRESTHandler
import filer
from filer import AssetItemRESTHandler
from filer import AssetUriRESTHandler
from filer import FileManagerAndEditor
from filer import FilesItemRESTHandler
import messages
from peer_review import AssignmentManager
import unit_lesson_editor
from unit_lesson_editor import AssessmentRESTHandler
from unit_lesson_editor import ImportCourseRESTHandler
from unit_lesson_editor import LessonRESTHandler
from unit_lesson_editor import LinkRESTHandler
from unit_lesson_editor import UnitLessonEditor
from unit_lesson_editor import UnitLessonTitleRESTHandler
from unit_lesson_editor import UnitRESTHandler
from google.appengine.api import users
class DashboardHandler(
CourseSettingsHandler, FileManagerAndEditor, UnitLessonEditor,
AssignmentManager, ApplicationHandler, ReflectiveRequestHandler):
"""Handles all pages and actions required for managing a course."""
default_action = 'outline'
get_actions = [
default_action, 'assets', 'settings', 'analytics',
'edit_basic_settings', 'edit_settings', 'edit_unit_lesson',
'edit_unit', 'edit_link', 'edit_lesson', 'edit_assessment',
'add_asset', 'delete_asset', 'import_course', 'edit_assignment']
# Requests to these handlers automatically go through an XSRF token check
# that is implemented in ReflectiveRequestHandler.
post_actions = [
'compute_student_stats', 'create_or_edit_settings', 'add_unit',
'add_link', 'add_assessment', 'add_lesson',
'edit_basic_course_settings', 'add_reviewer', 'delete_reviewer']
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [
(AssessmentRESTHandler.URI, AssessmentRESTHandler),
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(CourseSettingsRESTHandler.URI, CourseSettingsRESTHandler),
(FilesItemRESTHandler.URI, FilesItemRESTHandler),
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(AssetUriRESTHandler.URI, AssetUriRESTHandler),
(ImportCourseRESTHandler.URI, ImportCourseRESTHandler),
(LessonRESTHandler.URI, LessonRESTHandler),
(LinkRESTHandler.URI, LinkRESTHandler),
(UnitLessonTitleRESTHandler.URI, UnitLessonTitleRESTHandler),
(UnitRESTHandler.URI, UnitRESTHandler),
]
def can_view(self):
"""Checks if current user has viewing rights."""
return roles.Roles.is_course_admin(self.app_context)
def can_edit(self):
"""Checks if current user has editing rights."""
return roles.Roles.is_course_admin(self.app_context)
def get(self):
"""Enforces rights to all GET operations."""
if not self.can_view():
self.redirect(self.app_context.get_slug())
return
# Force reload of properties. It is expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
return super(DashboardHandler, self).get()
def post(self):
"""Enforces rights to all POST operations."""
if not self.can_edit():
self.redirect(self.app_context.get_slug())
return
return super(DashboardHandler, self).post()
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
jinja_environment = jinja2.Environment(
autoescape=True, finalize=jinja_filters.finalize,
loader=jinja2.FileSystemLoader(dirs + [os.path.dirname(__file__)]))
jinja_environment.filters['js_string'] = jinja_filters.js_string
return jinja_environment.get_template(template_name)
def _get_alerts(self):
alerts = []
if not courses.is_editable_fs(self.app_context):
alerts.append('Read-only course.')
if not self.app_context.now_available:
alerts.append('The course is not publicly available.')
return '\n'.join(alerts)
def _get_top_nav(self):
current_action = self.request.get('action')
nav_mappings = [
('', 'Outline'),
('assets', 'Assets'),
('settings', 'Settings'),
('analytics', 'Analytics'),
('edit_assignment', 'Peer Review')]
nav = safe_dom.NodeList()
for action, title in nav_mappings:
class_name = 'selected' if action == current_action else ''
action_href = 'dashboard?action=%s' % action
nav.append(safe_dom.Element(
'a', href=action_href, className=class_name).add_text(
title))
if roles.Roles.is_super_admin():
nav.append(safe_dom.Element(
'a', href='/admin').add_text('Admin'))
nav.append(safe_dom.Element(
'a', href='https://code.google.com/p/course-builder/wiki/Dashboard',
target='_blank').add_text('Help'))
return nav
def render_page(self, template_values):
"""Renders a page using provided template values."""
template_values['top_nav'] = self._get_top_nav()
template_values['gcb_course_base'] = self.get_base_href(self)
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout'))
template_values[
'page_footer'] = 'Created on: %s' % datetime.datetime.now()
if not template_values.get('sections'):
template_values['sections'] = []
self.response.write(
self.get_template('view.html', []).render(template_values))
def format_title(self, text):
"""Formats standard title."""
title = self.app_context.get_environ()['course']['title']
return safe_dom.NodeList().append(
safe_dom.Text('Course Builder ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s ' % title)
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' Dashboard ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s' % text)
)
def _get_edit_link(self, url):
return safe_dom.NodeList().append(
safe_dom.Text(' ')
).append(
safe_dom.Element('a', href=url).add_text('Edit')
)
def _get_availability(self, resource):
if not hasattr(resource, 'now_available'):
return safe_dom.Text('')
if resource.now_available:
return safe_dom.Text('')
else:
return safe_dom.NodeList().append(
safe_dom.Text(' ')
).append(
safe_dom.Element(
'span', className='draft-label'
).add_text('(%s)' % unit_lesson_editor.DRAFT_TEXT)
)
def render_course_outline_to_html(self):
"""Renders course outline to HTML."""
course = courses.Course(self)
if not course.get_units():
return []
is_editable = filer.is_editable_fs(self.app_context)
lines = safe_dom.Element('ul', style='list-style: none;')
for unit in course.get_units():
if unit.type == 'A':
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href='assessment?name=%s' % unit.unit_id,
className='strong'
).add_text(unit.title)
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_assessment',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
lines.add_child(li)
continue
if unit.type == 'O':
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href=unit.href, className='strong'
).add_text(unit.title)
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_link',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
lines.add_child(li)
continue
if unit.type == 'U':
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href='unit?unit=%s' % unit.unit_id,
className='strong').add_text(
'Unit %s - %s' % (unit.index, unit.title))
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_unit',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
ol = safe_dom.Element('ol')
for lesson in course.get_lessons(unit.unit_id):
li2 = safe_dom.Element('li').add_child(
safe_dom.Element(
'a',
href='unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id),
).add_text(lesson.title)
).add_child(self._get_availability(lesson))
if is_editable:
url = self.get_action_url(
'edit_lesson', key=lesson.lesson_id)
li2.add_child(self._get_edit_link(url))
ol.add_child(li2)
li.add_child(ol)
lines.add_child(li)
continue
raise Exception('Unknown unit type: %s.' % unit.type)
return lines
def get_outline(self):
"""Renders course outline view."""
pages_info = [
safe_dom.Element(
'a', href=self.canonicalize_url('/announcements')
).add_text('Announcements'),
safe_dom.Element(
'a', href=self.canonicalize_url('/course')
).add_text('Course')]
outline_actions = []
if filer.is_editable_fs(self.app_context):
outline_actions.append({
'id': 'edit_unit_lesson',
'caption': 'Organize',
'href': self.get_action_url('edit_unit_lesson')})
outline_actions.append({
'id': 'add_lesson',
'caption': 'Add Lesson',
'action': self.get_action_url('add_lesson'),
'xsrf_token': self.create_xsrf_token('add_lesson')})
outline_actions.append({
'id': 'add_unit',
'caption': 'Add Unit',
'action': self.get_action_url('add_unit'),
'xsrf_token': self.create_xsrf_token('add_unit')})
outline_actions.append({
'id': 'add_link',
'caption': 'Add Link',
'action': self.get_action_url('add_link'),
'xsrf_token': self.create_xsrf_token('add_link')})
outline_actions.append({
'id': 'add_assessment',
'caption': 'Add Assessment',
'action': self.get_action_url('add_assessment'),
'xsrf_token': self.create_xsrf_token('add_assessment')})
if not courses.Course(self).get_units():
outline_actions.append({
'id': 'import_course',
'caption': 'Import',
'href': self.get_action_url('import_course')
})
data_info = self.list_files('/data/')
sections = [
{
'title': 'Pages',
'description': messages.PAGES_DESCRIPTION,
'children': pages_info},
{
'title': 'Course Outline',
'description': messages.COURSE_OUTLINE_DESCRIPTION,
'actions': outline_actions,
'pre': self.render_course_outline_to_html()},
{
'title': 'Data Files',
'description': messages.DATA_FILES_DESCRIPTION,
'children': data_info}]
template_values = {}
template_values['page_title'] = self.format_title('Outline')
template_values['alerts'] = self._get_alerts()
template_values['sections'] = sections
self.render_page(template_values)
def get_action_url(self, action, key=None, extra_args=None):
args = {'action': action}
if key:
args['key'] = key
if extra_args:
args.update(extra_args)
url = '/dashboard?%s' % urllib.urlencode(args)
return self.canonicalize_url(url)
def get_settings(self):
"""Renders course settings view."""
yaml_actions = []
basic_setting_actions = []
# Basic course info.
course_info = [
'Course Title: %s' % self.app_context.get_environ()['course'][
'title'],
'Context Path: %s' % self.app_context.get_slug(),
'Datastore Namespace: %s' % self.app_context.get_namespace_name()]
# Course file system.
fs = self.app_context.fs.impl
course_info.append(('File System: %s' % fs.__class__.__name__))
if fs.__class__ == vfs.LocalReadOnlyFileSystem:
course_info.append(('Home Folder: %s' % sites.abspath(
self.app_context.get_home_folder(), '/')))
# Enable editing if supported.
if filer.is_editable_fs(self.app_context):
yaml_actions.append({
'id': 'edit_course_yaml',
'caption': 'Advanced Edit',
'action': self.get_action_url('create_or_edit_settings'),
'xsrf_token': self.create_xsrf_token(
'create_or_edit_settings')})
yaml_actions.append({
'id': 'edit_basic_course_settings',
'caption': 'Edit',
'action': self.get_action_url('edit_basic_course_settings'),
'xsrf_token': self.create_xsrf_token(
'edit_basic_course_settings')})
# Yaml file content.
yaml_info = []
yaml_stream = self.app_context.fs.open(
self.app_context.get_config_filename())
if yaml_stream:
yaml_lines = yaml_stream.read().decode('utf-8')
for line in yaml_lines.split('\n'):
yaml_info.append(line)
else:
yaml_info.append('< empty file >')
# Prepare template values.
template_values = {}
template_values['page_title'] = self.format_title('Settings')
template_values['page_description'] = messages.SETTINGS_DESCRIPTION
template_values['sections'] = [
{
'title': 'About the Course',
'description': messages.ABOUT_THE_COURSE_DESCRIPTION,
'actions': basic_setting_actions,
'children': course_info},
{
'title': 'Contents of course.yaml file',
'description': messages.CONTENTS_OF_THE_COURSE_DESCRIPTION,
'actions': yaml_actions,
'children': yaml_info}]
self.render_page(template_values)
def list_files(self, subfolder):
"""Makes a list of files in a subfolder."""
home = sites.abspath(self.app_context.get_home_folder(), '/')
files = self.app_context.fs.list(
sites.abspath(self.app_context.get_home_folder(), subfolder))
result = []
for abs_filename in sorted(files):
filename = os.path.relpath(abs_filename, home)
result.append(vfs.AbstractFileSystem.normpath(filename))
return result
def list_and_format_file_list(
self, title, subfolder,
links=False, upload=False, prefix=None, caption_if_empty='< none >',
edit_url_template=None, sub_title=None):
"""Walks files in folders and renders their names in a section."""
items = safe_dom.NodeList()
count = 0
for filename in self.list_files(subfolder):
if prefix and not filename.startswith(prefix):
continue
li = safe_dom.Element('li')
if links:
li.add_child(safe_dom.Element(
'a', href=urllib.quote(filename)).add_text(filename))
if edit_url_template:
edit_url = edit_url_template % urllib.quote(filename)
li.add_child(
safe_dom.Entity(' ')
).add_child(
safe_dom.Element('a', href=edit_url).add_text('[Edit]'))
else:
li.add_text(filename)
count += 1
items.append(li)
output = safe_dom.NodeList()
if filer.is_editable_fs(self.app_context) and upload:
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?%s' % urllib.urlencode(
{'action': 'add_asset', 'base': subfolder})
).add_text('Upload')
).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;'))
if title:
h3 = safe_dom.Element('h3')
if count:
h3.add_text('%s (%s)' % (title, count))
else:
h3.add_text(title)
output.append(h3)
if sub_title:
output.append(safe_dom.Element('blockquote').add_text(sub_title))
if items:
output.append(safe_dom.Element('ol').add_children(items))
else:
if caption_if_empty:
output.append(
safe_dom.Element('blockquote').add_text(caption_if_empty))
return output
def get_assets(self):
"""Renders course assets view."""
def inherits_from(folder):
return '< inherited from %s >' % folder
items = safe_dom.NodeList().append(
self.list_and_format_file_list(
'Assessments', '/assets/js/', links=True,
prefix='assets/js/assessment-')
).append(
self.list_and_format_file_list(
'Activities', '/assets/js/', links=True,
prefix='assets/js/activity-')
).append(
self.list_and_format_file_list(
'Images & Documents', '/assets/img/', links=True, upload=True,
edit_url_template='dashboard?action=delete_asset&uri=%s',
sub_title='< inherited from /assets/img/ >',
caption_if_empty=None)
).append(
self.list_and_format_file_list(
'Cascading Style Sheets', '/assets/css/', links=True,
caption_if_empty=inherits_from('/assets/css/'))
).append(
self.list_and_format_file_list(
'JavaScript Libraries', '/assets/lib/', links=True,
caption_if_empty=inherits_from('/assets/lib/'))
).append(
self.list_and_format_file_list(
'View Templates', '/views/',
caption_if_empty=inherits_from('/views/'))
)
template_values = {}
template_values['page_title'] = self.format_title('Assets')
template_values['page_description'] = messages.ASSETS_DESCRIPTION
template_values['main_content'] = items
self.render_page(template_values)
def get_markup_for_basic_analytics(self, job):
"""Renders markup for basic enrollment and assessment analytics."""
subtemplate_values = {}
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
if not job:
update_message = safe_dom.Text(
'Enrollment/assessment statistics have not been calculated '
'yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
stats = transforms.loads(job.output)
stats_calculated = True
subtemplate_values['enrolled'] = stats['enrollment']['enrolled']
subtemplate_values['unenrolled'] = (
stats['enrollment']['unenrolled'])
scores = []
total_records = 0
for key, value in stats['scores'].items():
total_records += value[0]
avg = round(value[1] / value[0], 1) if value[0] else 0
scores.append({'key': key, 'completed': value[0],
'avg': avg})
subtemplate_values['scores'] = scores
subtemplate_values['total_records'] = total_records
update_message = safe_dom.Text("""
Enrollment and assessment statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating enrollment/assessment
statistics. Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text(
'Enrollment and assessment statistics update started at %s'
' and is running now. Please come back shortly.' %
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT))
subtemplate_values['stats_calculated'] = stats_calculated
subtemplate_values['errors'] = errors
subtemplate_values['update_message'] = update_message
return jinja2.utils.Markup(self.get_template(
'basic_analytics.html', [os.path.dirname(__file__)]
).render(subtemplate_values, autoescape=True))
def get_analytics(self):
"""Renders course analytics view."""
template_values = {}
template_values['page_title'] = self.format_title('Analytics')
at_least_one_job_exists = False
at_least_one_job_finished = False
basic_analytics_job = ComputeStudentStats(self.app_context).load()
stats_html = self.get_markup_for_basic_analytics(basic_analytics_job)
if basic_analytics_job:
at_least_one_job_exists = True
if basic_analytics_job.status_code == jobs.STATUS_CODE_COMPLETED:
at_least_one_job_finished = True
for callback in DashboardRegistry.analytics_handlers:
handler = callback()
handler.app_context = self.app_context
handler.request = self.request
handler.response = self.response
job = handler.stats_computer(self.app_context).load()
stats_html += handler.get_markup(job)
if job:
at_least_one_job_exists = True
if job.status_code == jobs.STATUS_CODE_COMPLETED:
at_least_one_job_finished = True
template_values['main_content'] = jinja2.utils.Markup(self.get_template(
'analytics.html', [os.path.dirname(__file__)]
).render({
'show_recalculate_button': (
at_least_one_job_finished or not at_least_one_job_exists),
'stats_html': stats_html,
'xsrf_token': self.create_xsrf_token('compute_student_stats'),
}, autoescape=True))
self.render_page(template_values)
def post_compute_student_stats(self):
"""Submits a new student statistics calculation task."""
job = ComputeStudentStats(self.app_context)
job.submit()
for callback in DashboardRegistry.analytics_handlers:
job = callback().stats_computer(self.app_context)
job.submit()
self.redirect('/dashboard?action=analytics')
class ScoresAggregator(object):
"""Aggregates scores statistics."""
def __init__(self):
# We store all data as tuples keyed by the assessment type name. Each
# tuple keeps:
# (student_count, sum(score))
self.name_to_tuple = {}
def visit(self, student):
if student.scores:
scores = transforms.loads(student.scores)
for key in scores.keys():
if key in self.name_to_tuple:
count = self.name_to_tuple[key][0]
score_sum = self.name_to_tuple[key][1]
else:
count = 0
score_sum = 0
self.name_to_tuple[key] = (
count + 1, score_sum + float(scores[key]))
class EnrollmentAggregator(object):
"""Aggregates enrollment statistics."""
def __init__(self):
self.enrolled = 0
self.unenrolled = 0
def visit(self, student):
if student.is_enrolled:
self.enrolled += 1
else:
self.unenrolled += 1
class ComputeStudentStats(jobs.DurableJob):
"""A job that computes student statistics."""
def run(self):
"""Computes student statistics."""
enrollment = EnrollmentAggregator()
scores = ScoresAggregator()
mapper = utils.QueryMapper(
Student.all(), batch_size=500, report_every=1000)
def map_fn(student):
enrollment.visit(student)
scores.visit(student)
mapper.run(map_fn)
data = {
'enrollment': {
'enrolled': enrollment.enrolled,
'unenrolled': enrollment.unenrolled},
'scores': scores.name_to_tuple}
return data
class DashboardRegistry(object):
"""Holds registered handlers that produce HTML code for the dashboard."""
analytics_handlers = []
@classmethod
def add_custom_analytics_section(cls, handler):
"""Adds handlers that provide additional data for the Analytics page."""
if handler not in cls.analytics_handlers:
existing_names = [h.name for h in cls.analytics_handlers]
existing_names.append('enrollment')
existing_names.append('scores')
if handler.name in existing_names:
raise Exception('Stats handler name %s is being duplicated.'
% handler.name)
cls.analytics_handlers.append(handler)
custom_module = None
def register_module():
"""Registers this module in the registry."""
dashboard_handlers = [('/dashboard', DashboardHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course Dashboard',
'A set of pages for managing Course Builder course.',
[], dashboard_handlers)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for management of individual students' peer review assignments."""
__author__ = 'Sean Lip (sll@google.com)'
import os
import urllib
from controllers.lessons import create_readonly_assessment_params
from controllers.utils import ApplicationHandler
import jinja2
from models import courses
from models import models
from models import review
from models import roles
from models import student_work
from modules.review import domain
import messages
class AssignmentsRights(object):
"""Manages view/edit rights for assignments and reviews."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class AssignmentManager(ApplicationHandler):
"""A view for managing human-reviewed assignments."""
def get_assignment_html(
self, peer_reviewed_units, unit_id=None, reviewee_id=None,
error_msg=None, readonly_assessment=None, review_steps=None,
reviewers=None, reviews_params=None):
"""Renders a template allowing an admin to select an assignment."""
edit_url = self.canonicalize_url('/dashboard')
return jinja2.utils.Markup(self.get_template(
'assignments_menu.html', [os.path.dirname(__file__)]
).render({
'REVIEW_STATE_COMPLETED': domain.REVIEW_STATE_COMPLETED,
'add_reviewer_action': self.get_action_url('add_reviewer'),
'add_reviewer_xsrf_token': self.create_xsrf_token('add_reviewer'),
'delete_reviewer_action': self.get_action_url('delete_reviewer'),
'delete_reviewer_xsrf_token': self.create_xsrf_token(
'delete_reviewer'),
'edit_assignment_action': 'edit_assignment',
'edit_url': edit_url,
'error_msg': error_msg,
'peer_reviewed_units': peer_reviewed_units,
'readonly_student_assessment': readonly_assessment,
'reviewee_id': reviewee_id or '',
'reviewers': reviewers,
'reviews_params': reviews_params,
'review_steps': review_steps,
'unit_id': unit_id,
}, autoescape=True))
def parse_request(self, course, unit_id, reviewee_id, reviewer_id=None):
"""Parses request parameters in a GET or POST request.
Args:
course: Course. A course object.
unit_id: str. The id of the unit.
reviewee_id: str. The email address of the reviewee.
reviewer_id: str. The email address of the reviewer.
Returns:
- a dict containing some subset of the following keys: unit,
reviewee, reviewer.
- if necessary, an error message to be passed to the frontend.
"""
request_params = {}
# Check unit validity.
if not unit_id:
return request_params, ''
unit = course.find_unit_by_id(unit_id)
if not unit:
return request_params, '404: Unit not found.'
if (unit.workflow.get_grader() != courses.HUMAN_GRADER or
unit.workflow.get_matcher() != review.PEER_MATCHER):
return request_params, '412: This unit is not peer-graded.'
request_params['unit'] = unit
# Check reviewee validity.
if not reviewee_id:
return request_params, '412: No student email supplied.'
reviewee = models.Student.get_enrolled_student_by_email(reviewee_id)
if not reviewee:
return (request_params,
'412: No student with this email address exists.')
request_params['reviewee'] = reviewee
# Check reviewer validity, if applicable.
if reviewer_id is not None:
if not reviewer_id:
return request_params, '412: No reviewer email supplied.'
reviewer = models.Student.get_enrolled_student_by_email(reviewer_id)
if not reviewer:
return (request_params,
'412: No reviewer with this email address exists.')
request_params['reviewer'] = reviewer
return request_params, ''
def get_edit_assignment(self):
"""Shows interface for selecting and viewing a student assignment."""
if not AssignmentsRights.can_view(self):
self.error(401)
return
course = courses.Course(self)
peer_reviewed_units = course.get_peer_reviewed_units()
template_values = {}
template_values['page_title'] = self.format_title('Peer Review')
template_values['page_description'] = (
messages.ASSIGNMENTS_MENU_DESCRIPTION)
unit_id = self.request.get('unit_id')
if not unit_id:
# No unit has been set yet, so display an empty form.
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units)
self.render_page(template_values)
return
reviewee_id = self.request.get('reviewee_id')
# This field may be populated due to a redirect from a POST method.
post_error_msg = self.request.get('post_error_msg')
request_params, error_msg = self.parse_request(
course, unit_id, reviewee_id)
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
if error_msg:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg=error_msg)
self.render_page(template_values)
return
# Render content.
assessment_content = course.get_assessment_content(unit)
rp = course.get_reviews_processor()
submission_and_review_steps = rp.get_submission_and_review_steps(
unit.unit_id, reviewee.get_key())
if not submission_and_review_steps:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg='412: This student hasn\'t submitted the assignment.'
)
self.render_page(template_values)
return
submission_contents = submission_and_review_steps[0]
answer_list = student_work.StudentWorkUtils.get_answer_list(
submission_contents)
readonly_assessment = create_readonly_assessment_params(
assessment_content, answer_list
)
review_form = course.get_review_form_content(unit)
review_steps = submission_and_review_steps[1]
reviews = rp.get_reviews_by_keys(
unit.unit_id,
[review_step.review_key for review_step in review_steps],
handle_empty_keys=True)
reviews_params = []
reviewers = []
for idx, review_step in enumerate(review_steps):
params = create_readonly_assessment_params(
review_form,
student_work.StudentWorkUtils.get_answer_list(reviews[idx])
)
reviews_params.append(params)
reviewer = models.Student.get_student_by_user_id(
review_step.reviewer_key.name()).key().name()
reviewers.append(reviewer)
assert len(reviewers) == len(review_steps)
assert len(reviews_params) == len(review_steps)
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
readonly_assessment=readonly_assessment, review_steps=review_steps,
error_msg=post_error_msg, reviewers=reviewers,
reviews_params=reviews_params)
self.render_page(template_values)
def post_add_reviewer(self):
"""Adds a new reviewer to a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
reviewer_id = self.request.get('reviewer_id')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id, reviewer_id=reviewer_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'reviewer_id': reviewer_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
reviewer = request_params.get('reviewer')
rp = course.get_reviews_processor()
reviewee_key = reviewee.get_key()
reviewer_key = reviewer.get_key()
try:
rp.add_reviewer(unit.unit_id, reviewee_key, reviewer_key)
except domain.TransitionError:
redirect_params['post_error_msg'] = (
'412: The reviewer is already assigned to this submission.')
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
def post_delete_reviewer(self):
"""Deletes a reviewer from a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
review_step_key = self.request.get('key')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
rp = course.get_reviews_processor()
unit = request_params.get('unit')
rp.delete_reviewer(unit.unit_id, review_step_key)
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder web application entry point."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import webapp2
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
from common import tags
from controllers import sites
from models import custom_modules
import modules.admin.admin
import modules.announcements.announcements
import modules.courses.courses
import modules.dashboard.dashboard
import modules.oauth2.oauth2
import modules.oeditor.oeditor
import modules.review.review
# use this flag to control debug only features
debug = not appengine_config.PRODUCTION_MODE
# init and enable most known modules
modules.oeditor.oeditor.register_module().enable()
modules.admin.admin.register_module().enable()
modules.dashboard.dashboard.register_module().enable()
modules.announcements.announcements.register_module().enable()
modules.review.review.register_module().enable()
modules.courses.courses.register_module().enable()
# register modules that are not enabled by default.
modules.oauth2.oauth2.register_module()
# compute all possible routes
global_routes, namespaced_routes = custom_modules.Registry.get_all_routes()
# routes available at '/%namespace%/' context paths
sites.ApplicationRequestHandler.bind(namespaced_routes)
app_routes = [(r'(.*)', sites.ApplicationRequestHandler)]
# tag extension resource routes
extensions_tag_resource_routes = [(
'/extensions/tags/.*/resources/.*', tags.ResourcesHandler)]
# i18n configuration for jinja2
webapp2_i18n_config = {'translations_path': os.path.join(
appengine_config.BUNDLE_ROOT, 'modules/i18n/resources/locale')}
# init application
app = webapp2.WSGIApplication(
global_routes + extensions_tag_resource_routes + app_routes,
config={'webapp2_extras.i18n': webapp2_i18n_config},
debug=debug)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom configurations and functions for Google App Engine."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
import os
import sys
# Whether we are running in the production environment.
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
# Set this flag to true to enable bulk downloads of Javascript/CSS files in lib
BUNDLE_LIB_FILES = True
# this is the official location of this app for computing of all relative paths
BUNDLE_ROOT = os.path.dirname(__file__)
# make all Windows and Linux paths have the same separator '/'
BUNDLE_ROOT = BUNDLE_ROOT.replace('\\', '/')
# Default namespace name is '' and not None.
DEFAULT_NAMESPACE_NAME = ''
class _Library(object):
"""DDO that represents a Python library contained in a .zip file."""
def __init__(self, zipfile, relative_path=None):
self._relative_path = relative_path
self._zipfile = zipfile
@property
def file_path(self):
"""Path to the library's file on disk."""
return os.path.join(BUNDLE_ROOT, 'lib', self._zipfile)
@property
def full_path(self):
"""Full path for imports, containing archive-relative paths if any."""
path = self.file_path
if self._relative_path:
path = os.path.join(path, self._relative_path)
return path
# Third-party library zip files.
THIRD_PARTY_LIBS = [
_Library('babel-0.9.6.zip'),
_Library('html5lib-0.95.zip'),
_Library('httplib2-0.8.zip', relative_path='httplib2-0.8/python2'),
_Library('gaepytz-2011h.zip'),
_Library(
'google-api-python-client-1.1.zip',
relative_path='google-api-python-client-1.1'),
# I repackaged this zip, which is available only as .tar.gz, because Python
# can import directly from zips but not from tarballs. In real life we'd
# need to find a better way of packaging this stuff.
_Library('python-gflags-2.0.zip', relative_path='python-gflags-2.0'),
_Library('pyparsing-1.5.7.zip'),
]
def gcb_force_default_encoding(encoding):
"""Force default encoding to a specific value."""
# Eclipse silently sets default encoding to 'utf-8', while GAE forces
# 'ascii'. We need to control this directly for consistency.
if sys.getdefaultencoding() != encoding:
reload(sys)
sys.setdefaultencoding(encoding)
def gcb_init_third_party():
"""Add all third party libraries to system path."""
for lib in THIRD_PARTY_LIBS:
if not os.path.exists(lib.file_path):
raise Exception('Library does not exist: %s' % lib.file_path)
sys.path.insert(0, lib.full_path)
gcb_init_third_party()
| Python |
"""Unit tests for the common.sanitize module."""
__author__ = 'John Orr (jorr@google.com)'
import unittest
from common import safe_dom
class MockNode(safe_dom.Node):
def __init__(self, value):
self._value = value
@property
def sanitized(self):
return self._value
class NodeListTests(unittest.TestCase):
"""Unit tests for common.safe_dom.NodeList."""
def test_list(self):
"""NodeList should escape all its members."""
node_list = safe_dom.NodeList()
node_list.append(MockNode('a')).append(MockNode('b'))
self.assertEqual('ab', node_list.sanitized)
def test_len(self):
"""NodeList should support len."""
node_list = safe_dom.NodeList().append(
MockNode('a')).append(MockNode('b'))
self.assertEqual(2, len(node_list))
def test_append_node_list(self):
"""NodeList should support appending both Nodes and NodeLists."""
node_list = safe_dom.NodeList().append(
safe_dom.NodeList().append(MockNode('a')).append(MockNode('b'))
).append(MockNode('c'))
self.assertEqual('abc', node_list.__str__())
class TextTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Text."""
def test_text_sanitizes(self):
"""Text should sanitize unsafe characters."""
unsafe_string = '<script>'
text = safe_dom.Text(unsafe_string)
self.assertEqual('<script>', text.sanitized)
def test_str_returns_sanitized(self):
"""The _str__ method should return sanitized text."""
unsafe_string = '<script>'
text = safe_dom.Text(unsafe_string)
self.assertEqual('<script>', text.__str__())
class ElementTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Element."""
def test_build_simple_element(self):
"""Element should build an element without attributes or children."""
element = safe_dom.Element('p')
self.assertEqual('<p></p>', element.__str__())
def test_reject_bad_tag_names(self):
"""Element should reject bad tag names."""
bad_names = ['2a', 'a b', '@', 'a-b']
for name in bad_names:
try:
safe_dom.Element(name)
except AssertionError:
continue
self.fail('Expected an exception: "%s"' % name)
def test_reject_bad_attribute_names(self):
"""Element should reject bad attribute names."""
bad_names = ['2a', 'a b', '@', 'a-b']
for name in bad_names:
try:
safe_dom.Element('p', **{name: 'good value'})
except AssertionError:
continue
self.fail('Expected an exception: "%s"' % name)
def test_include_attributes(self):
"""Element should include tag attributes."""
element = safe_dom.Element('button', style='foo', onclick='action')
self.assertEqual(
'<button onclick="action" style="foo"></button>',
element.__str__())
def test_escape_quotes(self):
"""Element should escape single and double quote characters."""
element = safe_dom.Element('a', href='a\'b"c`d')
self.assertEqual(
'<a href="a'b"c`d"></a>', element.__str__())
def test_allow_parens(self):
"""Element should allow parentheses in attributes."""
element = safe_dom.Element('a', action='myAction()')
self.assertEqual('<a action="myAction()"></a>', element.__str__())
def test_allow_urls(self):
"""Element should allow urls with a method sepcified in an attribute."""
element = safe_dom.Element(
'a', action='http://a.b.com/d/e/f?var1=val1&var2=val2#fra')
self.assertEqual(
'<a action="http://a.b.com/d/e/f?var1=val1&var2=val2#fra"></a>',
element.__str__())
def test_url_query_chars(self):
"""Element should pass '?' and '=' characters in an attribute."""
element = safe_dom.Element('a', action='target?action=foo&value=bar')
self.assertEqual(
'<a action="target?action=foo&value=bar"></a>',
element.__str__())
def test_convert_none_to_empty(self):
"""An attribute with value None should render as empty."""
element = safe_dom.Element('a', action=None)
self.assertEqual('<a action=""></a>', element.__str__())
def test_coerce_className(self): # pylint: disable-msg=g-bad-name
"""Element should replace the 'className' attrib with 'class'."""
element = safe_dom.Element('p', className='foo')
self.assertEqual('<p class="foo"></p>', element.__str__())
def test_include_children(self):
"""Element should include child elements."""
element = safe_dom.Element('a').add_child(
safe_dom.Element('b').add_child(
safe_dom.Element('c'))
).add_child(
safe_dom.Element('d'))
self.assertEqual('<a><b><c></c></b><d></d></a>', element.__str__())
def test_include_node_list(self):
"""Element should include a list of children."""
element = safe_dom.Element('a').add_children(
safe_dom.NodeList().append(MockNode('b')).append(MockNode('c')))
self.assertEqual('<a>bc</a>', element.__str__())
def test_sanitize_children(self):
"""Element should sanitize child elements as they are included."""
element = safe_dom.Element('td').add_child(
safe_dom.Element('a', href='foo"bar').add_text('1<2'))
self.assertEqual(
'<td><a href="foo"bar">1<2</a></td>', element.__str__())
def test_add_text(self):
"""Adding text should add text which will be sanitized."""
self.assertEqual(
'<a>1<2</a>', safe_dom.Element('a').add_text('1<2').__str__())
def test_add_attribute(self):
"""Attributes can be added after initialization."""
self.assertEqual(
'<a b="c" d="e" f="g" h="i"></a>',
safe_dom.Element(
'a', b='c', d='e').add_attribute(f='g', h='i').__str__())
def test_void_elements_have_no_end_tags(self):
"""Void elements should have no end tag, e.g., <br/>."""
void_elements = [
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input',
'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track',
'wbr']
for elt in void_elements:
self.assertEqual('<%s/>' % elt, safe_dom.Element(elt).__str__())
def test_empty_non_void_elememnts_should_have_end_tags(self):
"""Non-void elements should have their end tags, even when empty."""
sample_elements = ['p', 'textarea', 'div']
for elt in sample_elements:
self.assertEqual(
'<%s></%s>' % (elt, elt), safe_dom.Element(elt).__str__())
class ScriptElementTests(unittest.TestCase):
"""Unit tests for common.safe_dom.ScriptElement."""
def test_script_should_not_escape_body(self):
""""The body of the script tag should not be escaped."""
script = safe_dom.ScriptElement()
script.add_text('alert("foo");')
script.add_text('1 < 2 && 2 > 1;')
self.assertEqual(
'<script>alert("foo");1 < 2 && 2 > 1;</script>', script.__str__())
def test_script_should_reject_close_script_tag_in_body(self):
"""Expect an error if the body of the script tag contains </script>."""
script = safe_dom.ScriptElement()
script.add_text('</script>')
try:
script.__str__()
self.fail('Expected an exception')
except ValueError:
pass
def test_script_should_not_allow_child_nodes_to_be_added(self):
"""Script should not allow child nodes to be added."""
script = safe_dom.ScriptElement()
try:
child = safe_dom.Element('br')
script.add_child(child)
self.fail('Expected an exception')
except ValueError:
pass
try:
children = safe_dom.NodeList().append(safe_dom.Element('br'))
script.add_children(children)
self.fail('Expected an exception')
except ValueError:
pass
class EntityTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Entity."""
def expect_pass(self, test_text):
entity = safe_dom.Entity(test_text)
self.assertEqual(test_text, entity.__str__())
def expect_fail(self, test_text):
try:
safe_dom.Entity(test_text)
except AssertionError:
return
self.fail('Expected an assert exception')
def test_should_pass_named_entities(self):
self.expect_pass(' ')
def test_should_pass_decimal_entities(self):
self.expect_pass('&')
def test_should_pass_hex_entities(self):
self.expect_pass('⚫')
def test_entities_must_start_with_ampersand(self):
self.expect_fail('nbsp;')
def test_entities_must_end_with_semicolon(self):
self.expect_fail(' ')
def test_named_entities_must_be_all_alpha(self):
self.expect_fail('&qu2ot;')
def test_decimal_entities_must_be_all_decimal_digits(self):
self.expect_fail('A6;')
def test_hex_entities_must_be_all_hex_digits(self):
self.expect_fail('ɪG')
def test_entitiesmust_be_non_empty(self):
self.expect_fail('&;')
self.expect_fail('&#;')
self.expect_fail('&#x;')
def test_should_reject_extraneous_characters(self):
self.expect_fail(' ')
self.expect_fail(' ')
def test_should_reject_tampering(self):
entity = safe_dom.Entity(' ')
entity._entity = '<script/>'
try:
entity.__str__()
except AssertionError:
return
self.fail('Expected an assert exception')
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common/schema_fields.py."""
__author__ = 'John Orr (jorr@google.com)'
import json
import unittest
from common import schema_fields
def remove_whitespace(s):
return ''.join(s.split())
class BaseFieldTests(unittest.TestCase):
"""Base class for the tests on a schema field."""
def assert_json_schema_value(self, expected, field):
self.assertEquals(
remove_whitespace(expected),
remove_whitespace(json.dumps(field.get_json_schema_dict())))
def assert_schema_dict_value(self, expected, field):
self.assertEquals(
remove_whitespace(expected),
remove_whitespace(json.dumps(field._get_schema_dict([]))))
class SchemaFieldTests(BaseFieldTests):
"""Unit tests for common.schema_fields.SchemaField."""
def test_simple_field(self):
field = schema_fields.SchemaField('aName', 'aLabel', 'aType')
expected = '{"type":"aType"}'
self.assert_json_schema_value(expected, field)
expected = '[[["_inputex"], {"label": "aLabel"}]]'
self.assert_schema_dict_value(expected, field)
self.assertEquals('aName', field.name)
def test_extra_schema_dict(self):
field = schema_fields.SchemaField(
'aName', 'aLabel', 'aType',
extra_schema_dict_values={'a': 'A', 'b': 'B'})
expected = '[[["_inputex"], {"a": "A", "b": "B", "label": "aLabel"}]]'
self.assert_schema_dict_value(expected, field)
class FieldArrayTests(BaseFieldTests):
"""Unit tests for common.schema_fields.FieldArray."""
def test_field_array_with_simple_members(self):
array = schema_fields.FieldArray(
'aName', 'aLabel',
item_type=schema_fields.SchemaField(
'unusedName', 'field_label', 'aType'))
expected = """
{
"items": {"type": "aType"},
"type": "array"
}"""
self.assert_json_schema_value(expected, array)
expected = """
[
[["_inputex"],{"label":"aLabel"}],
[["items","_inputex"],{"label":"field_label"}]
]
"""
self.assert_schema_dict_value(expected, array)
def test_field_array_with_object_members(self):
object_type = schema_fields.FieldRegistry('object_title')
object_type.add_property(schema_fields.SchemaField(
'prop_name', 'prop_label', 'prop_type'))
field = schema_fields.FieldArray(
'aName', 'aLabel', item_type=object_type)
expected = """
{
"items": {
"type": "object",
"id": "object_title",
"properties": {
"prop_name": {"type":"prop_type"}
}
},
"type":"array"}
"""
self.assert_json_schema_value(expected, field)
expected = """
[
[["_inputex"],{"label":"aLabel"}],
[["items","title"],"object_title"],
[["items","properties","prop_name","_inputex"],{"label":"prop_label"}]
]
"""
self.assert_schema_dict_value(expected, field)
def test_extra_schema_dict(self):
array = schema_fields.FieldArray(
'aName', 'aLabel',
item_type=schema_fields.SchemaField(
'unusedName', 'field_label', 'aType'),
extra_schema_dict_values={'a': 'A', 'b': 'B'})
expected = """
[
[["_inputex"],{"a":"A","b":"B","label":"aLabel"}],
[["items","_inputex"],{"label":"field_label"}]]
"""
self.assert_schema_dict_value(expected, array)
class FieldRegistryTests(BaseFieldTests):
"""Unit tests for common.schema_fields.FieldRegistry."""
def test_single_property(self):
reg = schema_fields.FieldRegistry(
'registry_name', 'registry_description')
reg.add_property(schema_fields.SchemaField(
'field_name', 'field_label', 'property_type',
description='property_description'))
expected = """
{
"properties": {
"field_name": {
"type": "property_type",
"description": "property_description"
}
},
"type": "object",
"id": "registry_name",
"description": "registry_description"
}"""
self.assert_json_schema_value(expected, reg)
expected = """
[
[["title"], "registry_name"],
[["properties","field_name","_inputex"], {
"description": "property_description",
"label":"field_label"
}]
]
"""
self.assert_schema_dict_value(expected, reg)
def test_single_property_with_select_data(self):
reg = schema_fields.FieldRegistry(
'registry_name', 'registry_description')
reg.add_property(schema_fields.SchemaField(
'field_name', 'field_label', 'select',
select_data=[('a', 'A'), ('b', 'B')]))
expected = """
{
"properties": {
"field_name": {
"type": "select"
}
},
"type": "object",
"id": "registry_name",
"description": "registry_description"
}"""
self.assert_json_schema_value(expected, reg)
expected = """
[
[["title"],"registry_name"],
[["properties","field_name","_inputex"],{
"choices":[
{"value": "a", "label": "A"},
{"value": "b","label": "B"}],
"label":"field_label"
}]
]
"""
self.assert_schema_dict_value(expected, reg)
def test_object_with_array_property(self):
reg = schema_fields.FieldRegistry(
'registry_name', 'registry_description')
reg.add_property(schema_fields.SchemaField(
'field_name', 'field_label', 'field_type',
description='field_description'))
reg.add_property(schema_fields.FieldArray(
'array_name', 'array_label',
item_type=schema_fields.SchemaField(
'unusedName', 'unusedLabel', 'aType')))
expected = """
{
"properties": {
"field_name": {
"type": "field_type",
"description": "field_description"
},
"array_name": {
"items": {"type": "aType"},
"type":"array"
}
},
"type": "object",
"id": "registry_name",
"description": "registry_description"
}
"""
self.assert_json_schema_value(expected, reg)
def test_extra_schema_dict(self):
reg = schema_fields.FieldRegistry(
'aName', 'aLabel',
extra_schema_dict_values={'a': 'A', 'b': 'B'})
expected = """
[
[["title"], "aName"],
[["_inputex"], {"a": "A", "b": "B"}]]
"""
self.assert_schema_dict_value(expected, reg)
def test_mc_question_schema(self):
"""The multiple choice question schema is a good end-to-end example."""
mc_question = schema_fields.FieldRegistry(
'MC Question',
extra_schema_dict_values={'className': 'mc-question'})
mc_question.add_property(
schema_fields.SchemaField('question', 'Question', 'string'))
choice_type = schema_fields.FieldRegistry(
'choice', extra_schema_dict_values={'className': 'mc-choice'})
choice_type.add_property(
schema_fields.SchemaField('text', 'Text', 'string'))
choice_type.add_property(
schema_fields.SchemaField('score', 'Score', 'string'))
choice_type.add_property(
schema_fields.SchemaField('feedback', 'Feedback', 'string'))
choices_array = schema_fields.FieldArray(
'choices', 'Choices', item_type=choice_type)
mc_question.add_property(choices_array)
expected = """
{
"type":"object",
"id":"MCQuestion",
"properties":{
"question":{"type":"string"},
"choices":{
"items":{
"type":"object",
"id":"choice",
"properties":{
"text":{"type":"string"},
"score":{"type":"string"},
"feedback":{"type":"string"}
}
},
"type":"array"
}
}
}
"""
self.assert_json_schema_value(expected, mc_question)
expected = """
[
[["title"],"MCQuestion"],
[["_inputex"],{"className":"mc-question"}],
[["properties","question","_inputex"],{"label":"Question"}],
[["properties","choices","_inputex"],{"label":"Choices"}],
[["properties","choices","items","title"],"choice"],
[["properties","choices","items","_inputex"],{"className":"mc-choice"}],
[["properties","choices","items","properties","text","_inputex"],{
"label":"Text"
}],
[["properties","choices","items","properties","score","_inputex"],{
"label":"Score"
}],
[["properties","choices","items","properties","feedback","_inputex"],{
"label":"Feedback"
}]
]
"""
self.assert_schema_dict_value(expected, mc_question)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Workflow class in models.courses."""
__author__ = 'Sean Lip (sll@google.com)'
import unittest
from models.courses import LEGACY_HUMAN_GRADER_WORKFLOW
from models.courses import Workflow
import yaml
DATE_FORMAT_ERROR = (
'dates should be formatted as YYYY-MM-DD hh:mm (e.g. 1997-07-16 19:20) and '
'be specified in the UTC timezone.'
)
ERROR_HEADER = 'Error validating workflow specification: '
MISSING_KEYS_PREFIX = 'missing key(s) for a human-reviewed assessment:'
class DateTimeConversionTests(unittest.TestCase):
"""Unit tests for datetime conversion."""
def test_valid_datetime(self):
"""Valid datetimes should be converted without problems."""
workflow = Workflow('')
date_obj = workflow._convert_date_string_to_datetime('2012-03-21 12:30')
self.assertEqual(date_obj.year, 2012)
self.assertEqual(date_obj.month, 3)
self.assertEqual(date_obj.day, 21)
self.assertEqual(date_obj.hour, 12)
self.assertEqual(date_obj.minute, 30)
def test_invalid_datetime(self):
"""Valid datetimes should be converted without problems."""
invalid_date_strs = [
'abc', '2012-13-31 12:30', '2012-12-31T12:30',
'2012-13-31 12:30+0100']
workflow = Workflow('')
for date_str in invalid_date_strs:
with self.assertRaises(Exception):
workflow._convert_date_string_to_datetime(date_str)
def test_no_timezone_set(self):
"""Parsed date strings should contain no timezone information."""
workflow = Workflow('')
date_obj = workflow._convert_date_string_to_datetime('2012-03-21 12:30')
self.assertIsNone(date_obj.tzinfo)
class WorkflowValidationTests(unittest.TestCase):
"""Unit tests for workflow object validation."""
def setUp(self):
self.errors = []
self.valid_human_review_workflow_dict = yaml.safe_load(
LEGACY_HUMAN_GRADER_WORKFLOW)
def assert_matching_errors(self, expected, actual):
"""Prepend the error prefix to the error messages, then compare them."""
formatted_errors = []
for error in expected:
formatted_errors.append('%s%s' % (ERROR_HEADER, error))
self.assertEqual(formatted_errors, actual)
def to_yaml(self, adict):
"""Convert a dict to YAML."""
return yaml.safe_dump(adict)
def test_empty_string(self):
"""Validation should fail on an empty string."""
workflow = Workflow('')
workflow.validate(self.errors)
self.assert_matching_errors(['missing key: grader.'], self.errors)
def test_invalid_string(self):
"""Validation should fail for invalid YAML strings."""
workflow = Workflow('(')
workflow.validate(self.errors)
self.assertTrue(self.errors)
def test_not_dict(self):
"""Validation should fail for non-dict YAML strings."""
yaml_strs = ['- first\n- second', 'grader']
for yaml_str in yaml_strs:
self.errors = []
workflow = Workflow(yaml_str)
workflow.validate(self.errors)
self.assert_matching_errors(
['expected the YAML representation of a dict'], self.errors)
def test_missing_grader_key(self):
"""Validation should fail for missing grader key."""
workflow = Workflow(self.to_yaml({'not_grader': 'human'}))
workflow.validate(self.errors)
self.assert_matching_errors(['missing key: grader.'], self.errors)
def test_auto_grader(self):
"""Validation should pass for an auto-graded assessment."""
workflow = Workflow(self.to_yaml({'grader': 'auto'}))
workflow.validate(self.errors)
self.assertFalse(self.errors)
def test_invalid_human_grader(self):
"""Validation should fail for invalid human grading specifications."""
workflow = Workflow(self.to_yaml({'grader': 'human'}))
workflow.validate(self.errors)
self.assert_matching_errors([
'%s matcher, review_min_count, review_window_mins, '
'submission_due_date, review_due_date.' %
MISSING_KEYS_PREFIX], self.errors)
self.errors = []
workflow = Workflow(self.to_yaml(
{'grader': 'human', 'matcher': 'peer'}
))
workflow.validate(self.errors)
self.assert_matching_errors([
'%s review_min_count, review_window_mins, submission_due_date, '
'review_due_date.' % MISSING_KEYS_PREFIX], self.errors)
def test_invalid_review_min_count(self):
"""Validation should fail for bad review_min_count values."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['review_min_count'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_min_count should be an integer.'], self.errors)
self.errors = []
workflow_dict['review_min_count'] = -1
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_min_count should be a non-negative integer.'], self.errors)
self.errors = []
workflow_dict['review_min_count'] = 0
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assertFalse(self.errors)
def test_invalid_review_window_mins(self):
"""Validation should fail for bad review_window_mins values."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['review_window_mins'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_window_mins should be an integer.'], self.errors)
self.errors = []
workflow_dict['review_window_mins'] = -1
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_window_mins should be a non-negative integer.'],
self.errors)
self.errors = []
workflow_dict['review_window_mins'] = 0
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assertFalse(self.errors)
def test_invalid_date(self):
"""Validation should fail for invalid dates."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['submission_due_date'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors([DATE_FORMAT_ERROR], self.errors)
self.errors = []
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['review_due_date'] = 'test_string'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors([DATE_FORMAT_ERROR], self.errors)
def test_submission_date_after_review_date_fails(self):
"""Validation should fail if review date precedes submission date."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['submission_due_date'] = '2013-03-14 12:00'
workflow_dict['review_due_date'] = '2013-03-13 12:00'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['submission due date should be earlier than review due date.'],
self.errors)
def test_multiple_errors(self):
"""Validation should fail with multiple errors when appropriate."""
workflow_dict = self.valid_human_review_workflow_dict
workflow_dict['submission_due_date'] = '2013-03-14 12:00'
workflow_dict['review_due_date'] = '2013-03-13 12:00'
workflow_dict['review_window_mins'] = 'hello'
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assert_matching_errors(
['review_window_mins should be an integer; submission due date '
'should be earlier than review due date.'],
self.errors)
def test_valid_human_grader(self):
"""Validation should pass for valid human grading specifications."""
workflow_dict = self.valid_human_review_workflow_dict
workflow = Workflow(self.to_yaml(workflow_dict))
workflow.validate(self.errors)
self.assertFalse(self.errors)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common.tags."""
__author__ = 'John Orr (jorr@google.com)'
import unittest
from xml.etree import cElementTree
from common import tags
class CustomTagTests(unittest.TestCase):
"""Unit tests for the custom tag functionality."""
def setUp(self):
class SimpleTag(tags.BaseTag):
def render(self, unused_arg):
return cElementTree.Element('SimpleTag')
class ComplexTag(tags.BaseTag):
def render(self, unused_arg):
return cElementTree.XML(
'<Complex><Child>Text</Child></Complex>')
class ReRootTag(tags.BaseTag):
def render(self, node):
elt = cElementTree.Element('Re')
root = cElementTree.Element('Root')
elt.append(root)
for child in node:
root.append(child)
return elt
def new_get_tag_bindings():
return {
'simple': SimpleTag,
'complex': ComplexTag,
'reroot': ReRootTag}
self.old_get_tag_bindings = tags.get_tag_bindings
tags.get_tag_bindings = new_get_tag_bindings
def tearDown(self):
tags.get_tag_bindings = self.old_get_tag_bindings
def test_empty_text_is_passed(self):
safe_dom = tags.html_to_safe_dom(None)
self.assertEquals('', str(safe_dom))
def test_none_is_treated_as_empty(self):
safe_dom = tags.html_to_safe_dom(None)
self.assertEquals('', str(safe_dom))
def test_plain_text_is_passed(self):
safe_dom = tags.html_to_safe_dom('This is plain text.')
self.assertEquals('This is plain text.', str(safe_dom))
def test_mix_of_plain_text_and_tags_is_passed(self):
html = 'This is plain text<br/>on several<br/>lines'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals(html, str(safe_dom))
def test_simple_tag_is_replaced(self):
html = '<div><simple></simple></div>'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals('<div><SimpleTag></SimpleTag></div>', str(safe_dom))
def test_replaced_tag_preserves_tail_text(self):
html = '<div><simple></simple>Tail text</div>'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals(
'<div><SimpleTag></SimpleTag>Tail text</div>', str(safe_dom))
def test_simple_tag_consumes_children(self):
html = '<div><simple><p>child1</p></simple></div>'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals(
'<div><SimpleTag></SimpleTag></div>', str(safe_dom))
def test_complex_tag_preserves_its_own_children(self):
html = '<div><complex/></div>'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals(
'<div><Complex><Child>Text</Child></Complex></div>', str(safe_dom))
def test_reroot_tag_puts_children_in_new_root(self):
html = '<div><reroot><p>one</p><p>two</p></reroot></div>'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals(
'<div><Re><Root><p>one</p><p>two</p></Root></Re></div>',
str(safe_dom))
def test_chains_of_tags(self):
html = '<div><reroot><p><simple></p></reroot></div>'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals(
'<div><Re><Root><p><SimpleTag></SimpleTag></p></Root></Re></div>',
str(safe_dom))
def test_scripts_are_not_escaped(self):
html = '<script>alert("2"); var a = (1 < 2 && 2 > 1);</script>'
safe_dom = tags.html_to_safe_dom(html)
self.assertEquals(html, str(safe_dom))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance test for a peer review system.
WARNING! Use this script to test load Course Builder. This is very dangerous
feature, be careful, because anyone can impersonate super user of your Course
Builder instance; use only if you have to perform specific load testing
Keep in mind:
- when repeatedly running tests and creating new test namespaces,
flush memcache
Here is how to run:
- update /controllers/sites.py and enable CAN_IMPERSONATE
- navigate to the root directory of the app
- run a command line by typing:
python tests/integration/load_test.py \
--thread_count=5 \
--start_uid=1 \
http://mycourse.appspot.com
"""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import argparse
import cookielib
import json
import logging
import random
import re
import sys
import threading
import time
import urllib
import urllib2
# The unit id for the peer review assignment in the default course.
LEGACY_REVIEW_UNIT_ID = 'ReviewAssessmentExample'
# command line arguments parser
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'base_url', help=('Base URL of the course you want to test'), type=str)
PARSER.add_argument(
'--start_uid',
help='Initial value for unique thread identifier.', default=1, type=int)
PARSER.add_argument(
'--thread_count',
help='Number of concurrent threads for executing the test.',
default=1, type=int)
PARSER.add_argument(
'--iteration_count',
help='Number of iterations for executing the test. Each thread of each '
'iteration acts as a unique user with the uid equal to:'
'start_uid + thread_count * iteration_index.',
default=1, type=int)
def assert_contains(needle, haystack):
if needle not in haystack:
raise Exception('Expected to find term: %s\n%s', needle, haystack)
def assert_does_not_contain(needle, haystack):
if needle in haystack:
raise Exception('Did not expect to find term: %s\n%s', needle, haystack)
def assert_equals(expected, actual):
if expected != actual:
raise Exception('Expected equality of %s and %s.', expected, actual)
class WebSession(object):
"""A class that allows navigation of web pages keeping cookie session."""
PROGRESS_LOCK = threading.Lock()
MAX_RETRIES = 3
RETRY_SLEEP_SEC = 3
GET_COUNT = 0
POST_COUNT = 0
RETRY_COUNT = 0
PROGRESS_BATCH = 10
RESPONSE_TIME_HISTOGRAM = [0, 0, 0, 0, 0, 0]
def __init__(self, uid, common_headers=None):
if common_headers is None:
common_headers = {}
self.uid = uid
self.common_headers = common_headers
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
@classmethod
def increment_duration_bucket(cls, index):
cls.RESPONSE_TIME_HISTOGRAM[index] += 1
@classmethod
def update_duration(cls, duration):
if duration > 30:
cls.increment_duration_bucket(0)
elif duration > 15:
cls.increment_duration_bucket(1)
elif duration > 7:
cls.increment_duration_bucket(2)
elif duration > 3:
cls.increment_duration_bucket(3)
elif duration > 1:
cls.increment_duration_bucket(4)
else:
cls.increment_duration_bucket(5)
@classmethod
def log_progress(cls, force=False):
update = ((cls.GET_COUNT + cls.POST_COUNT) % (
cls.PROGRESS_BATCH) == 0)
if update or force:
logging.info(
'GET/POST:[%s, %s], RETRIES:[%s], SLA:%s',
cls.GET_COUNT, cls.POST_COUNT, cls.RETRY_COUNT,
cls.RESPONSE_TIME_HISTOGRAM)
def get_cookie_value(self, name):
for cookie in self.cj:
if cookie.name == name:
return cookie.value
return None
def is_soft_error(self, http_error):
"""Checks if HTTPError is due to starvation of frontend instances."""
body = http_error.fp.read()
# this is the text specific to the front end instance starvation, which
# is a retriable error for both GET and POST; normal HTTP error 500 has
# this specific text '<h1>500 Internal Server Error</h1>'
if http_error.code == 500 and '<h1>Error: Server Error</h1>' in body:
return True
logging.error(
'Non-retriable HTTP %s error:\n%s', http_error.code, body)
return False
def open(self, request, hint):
"""Executes any HTTP request."""
start_time = time.time()
try:
try_count = 0
while True:
try:
return self.opener.open(request)
except urllib2.HTTPError as he:
if (
try_count < WebSession.MAX_RETRIES and
self.is_soft_error(he)):
try_count += 1
with WebSession.PROGRESS_LOCK:
WebSession.RETRY_COUNT += 1
time.sleep(WebSession.RETRY_SLEEP_SEC)
continue
raise he
except Exception as e:
logging.info(
'Error in session %s executing: %s', self.uid, hint)
raise e
finally:
with WebSession.PROGRESS_LOCK:
self.update_duration(time.time() - start_time)
def get(self, url, expected_code=200):
"""HTTP GET."""
with WebSession.PROGRESS_LOCK:
WebSession.GET_COUNT += 1
self.log_progress()
request = urllib2.Request(url)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'GET %s' % url)
assert_equals(expected_code, response.code)
return response.read()
def post(self, url, args_dict, expected_code=200):
"""HTTP POST."""
with WebSession.PROGRESS_LOCK:
WebSession.POST_COUNT += 1
self.log_progress()
data = None
if args_dict:
data = urllib.urlencode(args_dict)
request = urllib2.Request(url, data)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'POST %s' % url)
assert_equals(expected_code, response.code)
return response.read()
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def start_all_tasks(cls, tasks):
"""Starts all tasks."""
for task in tasks:
task.start()
@classmethod
def check_all_tasks(cls, tasks):
"""Checks results of all tasks; fails on the first exception found."""
failed_count = 0
for task in tasks:
while True:
# Timeouts should happen after 30 seconds.
task.join(30)
if task.isAlive():
logging.info('Still waiting for: %s.', task.name)
continue
else:
break
if task.exception:
failed_count += 1
if failed_count:
raise Exception('Tasks failed: %s', failed_count)
@classmethod
def execute_task_list(cls, tasks):
"""Starts all tasks and checks the results."""
cls.start_all_tasks(tasks)
cls.check_all_tasks(tasks)
def run(self):
try:
self.func()
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Error in %s: %s', self.name, e)
self.exc_info = sys.exc_info()
raise self.exc_info[1], None, self.exc_info[2]
class PeerReviewLoadTest(object):
"""A peer review load test."""
def __init__(self, base_url, uid):
self.uid = uid
self.host = base_url
# this is an impersonation identity for the actor thread
self.email = 'load_test_bot_%s@example.com' % self.uid
self.name = 'Load Test Bot #%s' % self.uid
# begin web session
impersonate_header = {
'email': self.email, 'user_id': u'impersonation-%s' % self.uid}
self.session = WebSession(
uid=uid,
common_headers={'Gcb-Impersonate': json.dumps(impersonate_header)})
def run(self):
self.register_if_has_to()
self.submit_peer_review_assessment_if_possible()
while self.count_completed_reviews() < 2:
self.request_and_do_a_review()
def get_hidden_field(self, name, body):
# The "\s*" denotes arbitrary whitespace; sometimes, this tag is split
# across multiple lines in the HTML.
# pylint: disable-msg=anomalous-backslash-in-string
reg = re.compile(
'<input type="hidden" name="%s"\s* value="([^"]*)">' % name)
# pylint: enable-msg=anomalous-backslash-in-string
return reg.search(body).group(1)
def get_js_var(self, name, body):
reg = re.compile('%s = \'([^\']*)\';\n' % name)
return reg.search(body).group(1)
def get_draft_review_url(self, body):
"""Returns the URL of a draft review on the review dashboard."""
# The "\s*" denotes arbitrary whitespace; sometimes, this tag is split
# across multiple lines in the HTML.
# pylint: disable-msg=anomalous-backslash-in-string
reg = re.compile(
'<a href="([^"]*)">Assignment [0-9]+</a>\s*\(Draft\)')
# pylint: enable-msg=anomalous-backslash-in-string
result = reg.search(body)
if result is None:
return None
return result.group(1)
def register_if_has_to(self):
"""Performs student registration action."""
body = self.session.get('%s/' % self.host)
assert_contains('Logout', body)
if 'href="register"' not in body:
body = self.session.get('%s/student/home' % self.host)
assert_contains(self.email, body)
assert_contains(self.name, body)
return False
body = self.session.get('%s/register' % self.host)
xsrf_token = self.get_hidden_field('xsrf_token', body)
data = {'xsrf_token': xsrf_token, 'form01': self.name}
body = self.session.post('%s/register' % self.host, data)
body = self.session.get('%s/' % self.host)
assert_contains('Logout', body)
assert_does_not_contain('href="register"', body)
return True
def submit_peer_review_assessment_if_possible(self):
"""Submits the peer review assessment."""
body = self.session.get(
'%s/assessment?name=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
assert_contains('You may only submit this assignment once', body)
if 'Submitted assignment' in body:
# The assignment was already submitted.
return True
assessment_xsrf_token = self.get_js_var('assessmentXsrfToken', body)
answers = [
{'index': 0, 'type': 'regex',
'value': 'Answer 0 by %s' % self.email},
{'index': 1, 'type': 'choices', 'value': self.uid},
{'index': 2, 'type': 'regex',
'value': 'Answer 2 by %s' % self.email},
]
data = {
'answers': json.dumps(answers),
'assessment_type': LEGACY_REVIEW_UNIT_ID,
'score': 0,
'xsrf_token': assessment_xsrf_token,
}
body = self.session.post('%s/answer' % self.host, data)
assert_contains('Review peer assignments', body)
return True
def request_and_do_a_review(self):
"""Request a new review, wait for it to be granted, then submit it."""
review_dashboard_url = (
'%s/reviewdashboard?unit=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
completed = False
while not completed:
# Get peer review dashboard and inspect it.
body = self.session.get(review_dashboard_url)
assert_contains('Assignments for your review', body)
assert_contains('Review a new assignment', body)
# Pick first pending review if any or ask for a new review.
draft_review_url = self.get_draft_review_url(body)
if draft_review_url: # There is a pending review. Choose it.
body = self.session.get(
'%s/%s' % (self.host, draft_review_url))
else: # Request a new assignment to review.
assert_contains('xsrf_token', body)
xsrf_token = self.get_hidden_field('xsrf_token', body)
data = {
'unit_id': LEGACY_REVIEW_UNIT_ID,
'xsrf_token': xsrf_token,
}
body = self.session.post(review_dashboard_url, data)
# It is possible that we fail to get a new review because the
# old one is now visible, but was not yet visible when we asked
# for the dashboard page.
if (
'You must complete all assigned reviews before you '
'can request a new one.' in body):
continue
# It is possible that no submissions available for review yet.
# Wait for a while until they become available on the dashboard
# page.
if 'Back to the review dashboard' not in body:
assert_contains('Assignments for your review', body)
# Sleep for a random number of seconds between 1 and 4.
time.sleep(1.0 + random.random() * 3.0)
continue
# Submit the review.
review_xsrf_token = self.get_js_var('assessmentXsrfToken', body)
answers = [
{'index': 0, 'type': 'choices', 'value': 0},
{'index': 1, 'type': 'regex',
'value': 'Review 0 by %s' % self.email},
]
data = {
'answers': json.dumps(answers),
'assessment_type': None,
'is_draft': 'false',
'key': self.get_js_var('assessmentGlobals.key', body),
'score': 0,
'unit_id': LEGACY_REVIEW_UNIT_ID,
'xsrf_token': review_xsrf_token,
}
body = self.session.post('%s/review' % self.host, data)
assert_contains('Your review has been submitted', body)
return True
def count_completed_reviews(self):
"""Counts the number of reviews that the actor has completed."""
review_dashboard_url = (
'%s/reviewdashboard?unit=%s' % (self.host, LEGACY_REVIEW_UNIT_ID))
body = self.session.get(review_dashboard_url)
num_completed = body.count('(Completed)')
return num_completed
def run_all(args):
"""Runs test scenario in multiple threads."""
if args.thread_count < 1 or args.thread_count > 256:
raise Exception('Please use between 1 and 256 threads.')
start_time = time.time()
logging.info('Started testing: %s', args.base_url)
logging.info('base_url: %s', args.base_url)
logging.info('start_uid: %s', args.start_uid)
logging.info('thread_count: %s', args.thread_count)
logging.info('iteration_count: %s', args.iteration_count)
logging.info('SLAs are [>30s, >15s, >7s, >3s, >1s, <1s]')
try:
for iteration_index in range(0, args.iteration_count):
logging.info('Started iteration: %s', iteration_index)
tasks = []
WebSession.PROGRESS_BATCH = args.thread_count
for index in range(0, args.thread_count):
test = PeerReviewLoadTest(
args.base_url,
(
args.start_uid +
iteration_index * args.thread_count +
index))
task = TaskThread(
test.run, name='PeerReviewLoadTest-%s' % index)
tasks.append(task)
try:
TaskThread.execute_task_list(tasks)
except Exception as e:
logging.info('Failed iteration: %s', iteration_index)
raise e
finally:
WebSession.log_progress(force=True)
logging.info('Done! Duration (s): %s', time.time() - start_time)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run_all(PARSER.parse_args())
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Page objects used in functional tests for Course Builder."""
__author__ = 'John Orr (jorr@google.com)'
from selenium.webdriver.common import action_chains
from selenium.webdriver.common import by
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support import select
from selenium.webdriver.support import wait
class PageObject(object):
"""Superclass to hold shared logic used by page objects."""
def __init__(self, tester):
self._tester = tester
def find_element_by_css_selector(self, selector):
return self._tester.driver.find_element_by_css_selector(selector)
def find_element_by_id(self, elt_id):
return self._tester.driver.find_element_by_id(elt_id)
def find_element_by_link_text(self, text):
return self._tester.driver.find_element_by_link_text(text)
def find_element_by_name(self, name):
return self._tester.driver.find_element_by_name(name)
def expect_status_message_to_be(self, value):
wait.WebDriverWait(self._tester.driver, 15).until(
ec.text_to_be_present_in_element(
(by.By.ID, 'formStatusMessage'), value))
class EditorPageObject(PageObject):
"""Page object for pages which wait for the editor to finish loading."""
def __init__(self, tester):
super(EditorPageObject, self).__init__(tester)
def successful_butter_bar(driver):
form_status_message = driver.find_element_by_id('formStatusMessage')
return 'Success.' in form_status_message.text or (
not form_status_message.is_displayed())
wait.WebDriverWait(self._tester.driver, 15).until(successful_butter_bar)
def set_status(self, status):
select.Select(self.find_element_by_name(
'is_draft')).select_by_visible_text(status)
return self
def click_save(self, link_text='Save', status_message='Saved'):
self.find_element_by_link_text(link_text).click()
self.expect_status_message_to_be(status_message)
return self
def _close_and_return_to(self, continue_page):
self.find_element_by_link_text('Close').click()
return continue_page(self._tester)
class DashboardEditor(EditorPageObject):
"""A base class for the editors accessed from the Dashboard."""
def click_close(self):
return self._close_and_return_to(DashboardPage)
class RootPage(PageObject):
"""Page object to model the interactions with the root page."""
def load(self, base_url):
self._tester.driver.get(base_url + '/')
return self
def click_login(self):
self.find_element_by_link_text('Login').click()
return LoginPage(self._tester)
def click_dashboard(self):
self.find_element_by_link_text('Dashboard').click()
return DashboardPage(self._tester)
def click_admin(self):
self.find_element_by_link_text('Admin').click()
return AdminPage(self._tester)
def click_announcements(self):
self.find_element_by_link_text('Announcements').click()
return AnnouncementsPage(self._tester)
def click_register(self):
self.find_element_by_link_text('Register').click()
return RegisterPage(self._tester)
class RegisterPage(PageObject):
"""Page object to model the registration page."""
def enroll(self, name):
enroll = self.find_element_by_name('form01')
enroll.send_keys(name)
enroll.submit()
return RegisterPage(self._tester)
def verify_enrollment(self):
self._tester.assertTrue(
'Thank you for registering' in self.find_element_by_css_selector(
'.gcb-top-content').text)
return self
def click_course(self):
self.find_element_by_link_text('Course').click()
return RootPage(self._tester)
class AnnouncementsPage(PageObject):
"""Page object to model the announcements page."""
def click_add_new(self):
self.find_element_by_css_selector(
'#gcb-add-announcement > button').click()
return AnnouncementsEditorPage(self._tester)
def verify_announcement(self, title=None, date=None, body=None):
"""Verify that the announcement has the given fields."""
if title:
self._tester.assertEquals(
title, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside h2')[0].text)
if date:
self._tester.assertEquals(
date, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside p')[0].text)
if body:
self._tester.assertEquals(
body, self._tester.driver.find_elements_by_css_selector(
'div.gcb-aside p')[1].text)
return self
class AnnouncementsEditorPage(EditorPageObject):
"""Page to model the announcements editor."""
def enter_fields(self, title=None, date=None, body=None):
"""Enter title, date, and body into the announcement form."""
if title:
title_el = self.find_element_by_name('title')
title_el.clear()
title_el.send_keys(title)
if date:
date_el = self.find_element_by_name('date')
date_el.clear()
date_el.send_keys(date)
if body:
body_el = self.find_element_by_name('html')
body_el.clear()
body_el.send_keys(body)
return self
def click_close(self):
return self._close_and_return_to(AnnouncementsPage)
class LoginPage(PageObject):
"""Page object to model the interactions with the login page."""
def login(self, login, admin=False):
email = self._tester.driver.find_element_by_id('email')
email.clear()
email.send_keys(login)
if admin:
self.find_element_by_id('admin').click()
self.find_element_by_id('submit-login').click()
return RootPage(self._tester)
class DashboardPage(PageObject):
"""Page object to model the interactions with the dashboard landing page."""
def load(self, base_url, name):
self._tester.driver.get('/'.join([base_url, name, 'dashboard']))
return self
def verify_read_only_course(self):
self._tester.assertEquals(
'Read-only course.',
self.find_element_by_id('gcb-alerts-bar').text)
return self
def verify_selected_tab(self, tab_text):
tab = self.find_element_by_link_text(tab_text)
self._tester.assertEquals('selected', tab.get_attribute('class'))
def verify_not_publicly_available(self):
self._tester.assertEquals(
'The course is not publicly available.',
self.find_element_by_id('gcb-alerts-bar').text)
return self
def click_import(self):
self.find_element_by_css_selector('#import_course').click()
return Import(self._tester)
def click_add_unit(self):
self.find_element_by_css_selector('#add_unit > button').click()
return AddUnit(self._tester)
def click_add_assessment(self):
self.find_element_by_css_selector('#add_assessment > button').click()
return AddAssessment(self._tester)
def click_add_link(self):
self.find_element_by_css_selector('#add_link > button').click()
return AddLink(self._tester)
def click_add_lesson(self):
self.find_element_by_css_selector('#add_lesson > button').click()
return AddLesson(self._tester)
def click_organize(self):
self.find_element_by_css_selector('#edit_unit_lesson').click()
return Organize(self._tester)
def click_assets(self):
self.find_element_by_link_text('Assets').click()
return AssetsPage(self._tester)
def verify_course_outline_contains_unit(self, unit_title):
self.find_element_by_link_text(unit_title)
return self
class AssetsPage(PageObject):
"""Page object for the dashboard's assets tab."""
def click_upload(self):
self.find_element_by_link_text('Upload').click()
return AssetsEditorPage(self._tester)
def verify_image_file_by_name(self, name):
self.find_element_by_link_text(name) # throw exception if not found
return self
def verify_no_image_file_by_name(self, name):
self.find_element_by_link_text(name) # throw exception if not found
return self
def click_edit_image(self, name):
self.find_element_by_link_text(
name).parent.find_element_by_link_text('[Edit]').click()
return ImageEditorPage(self._tester)
class AssetsEditorPage(DashboardEditor):
"""Page object for upload image page."""
def select_file(self, path):
self.find_element_by_name('file').send_keys(path)
return self
def click_upload_and_expect_saved(self):
self.find_element_by_link_text('Upload').click()
self.expect_status_message_to_be('Saved.')
# Page automatically redirects after successful save.
wait.WebDriverWait(self._tester.driver, 15).until(
ec.title_contains('Assets'))
return AssetsPage(self._tester)
class ImageEditorPage(EditorPageObject):
"""Page object for the dashboard's view/delete image page."""
def click_delete(self):
self.find_element_by_link_text('Delete').click()
return self
def confirm_delete(self):
self._tester.driver.switch_to_alert().accept()
return AssetsPage(self._tester)
class AddUnit(DashboardEditor):
"""Page object to model the dashboard's add unit editor."""
def __init__(self, tester):
super(AddUnit, self).__init__(tester)
self.expect_status_message_to_be('New unit has been created and saved.')
def set_title(self, title):
title_el = self.find_element_by_name('title')
title_el.clear()
title_el.send_keys(title)
return self
class Import(DashboardEditor):
"""Page object to model the dashboard's unit/lesson organizer."""
pass
class AddAssessment(DashboardEditor):
"""Page object to model the dashboard's assessment editor."""
def __init__(self, tester):
super(AddAssessment, self).__init__(tester)
self.expect_status_message_to_be(
'New assessment has been created and saved.')
class AddLink(DashboardEditor):
"""Page object to model the dashboard's link editor."""
def __init__(self, tester):
super(AddLink, self).__init__(tester)
self.expect_status_message_to_be(
'New link has been created and saved.')
class AddLesson(DashboardEditor):
"""Page object to model the dashboard's lesson editor."""
RTE_EDITOR_ID = 'gcbRteField-0_editor'
RTE_TEXTAREA_ID = 'gcbRteField-0'
def __init__(self, tester):
super(AddLesson, self).__init__(tester)
self.expect_status_message_to_be(
'New lesson has been created and saved.')
def click_rich_text(self):
el = self.find_element_by_css_selector('div.rte-control')
self._tester.assertEqual('Rich Text', el.text)
el.click()
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable((by.By.ID, AddLesson.RTE_EDITOR_ID)))
return self
def click_plain_text(self):
el = self.find_element_by_css_selector('div.rte-control')
self._tester.assertEqual('<HTML>', el.text)
el.click()
return self
def send_rte_text(self, text):
self.find_element_by_id('gcbRteField-0_editor').send_keys(text)
return self
def select_rte_custom_tag_type(self, option_text):
"""Select the given option from the custom content type selector."""
self._ensure_rte_iframe_ready_and_switch_to_it()
select_tag = self.find_element_by_name('tag')
for option in select_tag.find_elements_by_tag_name('option'):
if option.text == option_text:
option.click()
break
else:
self._tester.fail('No option "%s" found' % option_text)
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable(
(by.By.PARTIAL_LINK_TEXT, 'Close')))
self._tester.driver.switch_to_default_content()
return self
def click_rte_add_custom_tag(self):
self.find_element_by_link_text(
'Insert Google Course Builder component').click()
return self
def doubleclick_rte_element(self, elt_css_selector):
self._tester.driver.switch_to_frame(AddLesson.RTE_EDITOR_ID)
target = self.find_element_by_css_selector(elt_css_selector)
action_chains.ActionChains(
self._tester.driver).double_click(target).perform()
self._tester.driver.switch_to_default_content()
return self
def _ensure_rte_iframe_ready_and_switch_to_it(self):
wait.WebDriverWait(self._tester.driver, 15).until(
ec.frame_to_be_available_and_switch_to_it('modal-editor-iframe'))
# Ensure inputEx has initialized too
wait.WebDriverWait(self._tester.driver, 15).until(
ec.element_to_be_clickable(
(by.By.PARTIAL_LINK_TEXT, 'Close')))
def set_rte_lightbox_field(self, field_css_selector, value):
self._ensure_rte_iframe_ready_and_switch_to_it()
field = self.find_element_by_css_selector(field_css_selector)
field.clear()
field.send_keys(value)
self._tester.driver.switch_to_default_content()
return self
def ensure_rte_lightbox_field_has_value(self, field_css_selector, value):
self._ensure_rte_iframe_ready_and_switch_to_it()
self._tester.assertEqual(
value,
self.find_element_by_css_selector(
field_css_selector).get_attribute('value'))
self._tester.driver.switch_to_default_content()
return self
def click_rte_save(self):
self._ensure_rte_iframe_ready_and_switch_to_it()
self.find_element_by_link_text('Save').click()
self._tester.driver.switch_to_default_content()
return self
def ensure_objectives_textarea_matches(self, text):
self._tester.assertEqual(text, self.find_element_by_id(
AddLesson.RTE_TEXTAREA_ID).get_attribute('value'))
return self
class Organize(DashboardEditor):
"""Page object to model the dashboard's unit/lesson organizer."""
pass
class AdminPage(PageObject):
"""Page object to model the interactions with the admimn landing page."""
def click_add_course(self):
self.find_element_by_id('add_course').click()
return AddCourseEditorPage(self._tester)
def click_settings(self):
self.find_element_by_link_text('Settings').click()
return AdminSettingsPage(self._tester)
class AdminSettingsPage(PageObject):
"""Page object for the admin settings."""
def click_override_admin_user_emails(self):
self._tester.driver.find_elements_by_css_selector(
'button.gcb-button')[0].click()
return ConfigPropertyOverridePage(self._tester)
def verify_admin_user_emails_contains(self, email):
self._tester.assertTrue(
email in self._tester.driver.find_elements_by_css_selector(
'table.gcb-config tr')[1].find_elements_by_css_selector(
'td')[1].text)
class ConfigPropertyOverridePage(EditorPageObject):
"""Page object for the admin property override editor."""
def set_value(self, value):
self.find_element_by_name('value').send_keys(value)
return self
def click_close(self):
return self._close_and_return_to(AdminSettingsPage)
class AddCourseEditorPage(EditorPageObject):
"""Page object for the dashboards' add course page."""
def set_fields(self, name=None, title=None, email=None):
"""Populate the fields in the add course page."""
name_el = self.find_element_by_name('name')
title_el = self.find_element_by_name('title')
email_el = self.find_element_by_name('admin_email')
name_el.clear()
title_el.clear()
email_el.clear()
if name:
name_el.send_keys(name)
if title:
title_el.send_keys(title)
if email:
email_el.send_keys(email)
return self
def click_close(self):
return self._close_and_return_to(AdminPage)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/review/peer.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import models
from models import student_work
from modules.review import domain
from modules.review import peer
from tests.functional import actions
from google.appengine.ext import db
class ReviewStepTest(actions.TestBase):
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
unit_id = 'unit_id'
reviewee_key = models.Student(key_name='reviewee@example.com').put()
reviewer_key = models.Student(key_name='reviewer@example.com').put()
submission_key = student_work.Submission(
reviewee_key=reviewee_key, unit_id=unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
reviewee_key=reviewee_key, reviewer_key=reviewer_key,
state=domain.REVIEW_STATE_ASSIGNED,
submission_key=submission_key, unit_id=unit_id).put()
self.assertEqual(
peer.ReviewStep.key_name(submission_key, reviewer_key),
step_key.name())
class ReviewSummaryTest(actions.TestBase):
"""Tests for ReviewSummary."""
def test_constructor_sets_key_name(self):
unit_id = 'unit_id'
reviewee_key = models.Student(key_name='reviewee@example.com').put()
submission_key = student_work.Submission(
reviewee_key=reviewee_key, unit_id=unit_id).put()
summary_key = peer.ReviewSummary(
reviewee_key=reviewee_key, submission_key=submission_key,
unit_id=unit_id).put()
self.assertEqual(
peer.ReviewSummary.key_name(submission_key), summary_key.name())
def test_decrement_count(self):
"""Tests decrement_count."""
summary = peer.ReviewSummary(
assigned_count=1, completed_count=1, expired_count=1,
reviewee_key=db.Key.from_path(
models.Student.kind(), 'reviewee@example.com'),
submission_key=db.Key.from_path(
student_work.Submission.kind(), 'submission'), unit_id='1')
self.assertEqual(1, summary.assigned_count)
summary.decrement_count(domain.REVIEW_STATE_ASSIGNED)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
summary.decrement_count(domain.REVIEW_STATE_COMPLETED)
self.assertEqual(0, summary.completed_count)
self.assertEqual(1, summary.expired_count)
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
self.assertEqual(0, summary.expired_count)
self.assertRaises(ValueError, summary.decrement_count, 'bad_state')
def test_increment_count(self):
"""Tests increment_count."""
summary = peer.ReviewSummary(
reviewee_key=db.Key.from_path(
models.Student.kind(), 'reviewee@example.com'),
submission_key=db.Key.from_path(
student_work.Submission.kind(), 'submission'), unit_id='1')
self.assertRaises(ValueError, summary.increment_count, 'bad_state')
self.assertEqual(0, summary.assigned_count)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
summary.increment_count(domain.REVIEW_STATE_COMPLETED)
self.assertEqual(1, summary.completed_count)
self.assertEqual(0, summary.expired_count)
summary.increment_count(domain.REVIEW_STATE_EXPIRED)
self.assertEqual(1, summary.expired_count)
check_overflow = peer.ReviewSummary(
assigned_count=domain.MAX_UNREMOVED_REVIEW_STEPS - 1,
reviewee_key=db.Key.from_path(
models.Student.kind(), 'reviewee@example.com'),
submission_key=db.Key.from_path(
student_work.Submission.kind(), 'submission'), unit_id='1')
# Increment to the limit succeeds...
check_overflow.increment_count(domain.REVIEW_STATE_ASSIGNED)
# ...but not beyond.
self.assertRaises(
db.BadValueError,
check_overflow.increment_count, domain.REVIEW_STATE_ASSIGNED)
| Python |
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/review/stats.py."""
__author__ = 'Sean Lip'
import actions
from actions import assert_contains
from actions import assert_equals
from controllers_review import get_review_payload
from controllers_review import get_review_step_key
from controllers_review import LEGACY_REVIEW_UNIT_ID
class PeerReviewAnalyticsTest(actions.TestBase):
"""Tests the peer review analytics page on the Course Author dashboard."""
def test_peer_review_analytics(self):
"""Test analytics page on course dashboard."""
student1 = 'student1@google.com'
name1 = 'Test Student 1'
student2 = 'student2@google.com'
name2 = 'Test Student 2'
peer = {'assessment_type': 'ReviewAssessmentExample'}
# Student 1 submits a peer review assessment.
actions.login(student1)
actions.register(self, name1)
actions.submit_assessment(self, 'ReviewAssessmentExample', peer)
actions.logout()
# Student 2 submits the same peer review assessment.
actions.login(student2)
actions.register(self, name2)
actions.submit_assessment(self, 'ReviewAssessmentExample', peer)
actions.logout()
email = 'admin@google.com'
# The admin looks at the analytics page on the dashboard.
actions.login(email, is_admin=True)
response = self.get('dashboard?action=analytics')
assert_contains(
'Google > Dashboard > Analytics', response.body)
assert_contains('have not been calculated yet', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert len(self.taskq.GetTasks('default')) == 2
response = self.get('dashboard?action=analytics')
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('were last updated at', response.body)
assert_contains('currently enrolled: 2', response.body)
assert_contains('total: 2', response.body)
assert_contains('Peer Review Analytics', response.body)
assert_contains('Sample peer review assignment', response.body)
# JSON code for the completion statistics.
assert_contains('"[{\\"stats\\": [2]', response.body)
actions.logout()
# Student2 requests a review.
actions.login(student2)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('Assignment to review', response.body)
# Student2 submits the review.
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
get_review_payload('R2for1'))
assert_contains(
'Your review has been submitted successfully', response.body)
actions.logout()
actions.login(email, is_admin=True)
response = self.get('dashboard?action=analytics')
assert_contains(
'Google > Dashboard > Analytics', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('Peer Review Analytics', response.body)
# JSON code for the completion statistics.
assert_contains('"[{\\"stats\\": [1, 1]', response.body)
actions.logout()
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models/review.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import entities
from models import models
from models import student_work
from tests.functional import actions
from google.appengine.ext import db
class ReferencedModel(entities.BaseEntity):
pass
class UnvalidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty()
class ValidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty(kind=ReferencedModel.kind())
class KeyPropertyTest(actions.TestBase):
"""Tests KeyProperty."""
def setUp(self): # From superclass. pylint: disable-msg=g-bad-name
super(KeyPropertyTest, self).setUp()
self.referenced_model_key = ReferencedModel().put()
def test_validation_and_datastore_round_trip_of_keys_succeeds(self):
"""Tests happy path for both validation and (de)serialization."""
model_with_reference = ValidatedReference(
referenced_model_key=self.referenced_model_key)
model_with_reference_key = model_with_reference.put()
model_with_reference_from_datastore = db.get(model_with_reference_key)
self.assertEqual(
self.referenced_model_key,
model_with_reference_from_datastore.referenced_model_key)
custom_model_from_datastore = db.get(
model_with_reference_from_datastore.referenced_model_key)
self.assertEqual(
self.referenced_model_key, custom_model_from_datastore.key())
self.assertTrue(isinstance(
model_with_reference_from_datastore.referenced_model_key,
db.Key))
def test_type_not_validated_if_kind_not_passed(self):
model_key = db.Model().put()
unvalidated = UnvalidatedReference(referenced_model_key=model_key)
self.assertEqual(model_key, unvalidated.referenced_model_key)
def test_validation_fails(self):
model_key = db.Model().put()
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key='not_a_key')
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key=model_key)
class ReviewTest(actions.TestBase):
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
unit_id = 'unit_id'
reviewer_key = models.Student(key_name='reviewer@example.com').put()
review_key = student_work.Review(
reviewer_key=reviewer_key, unit_id=unit_id).put()
self.assertEqual(
student_work.Review.key_name(unit_id, reviewer_key),
review_key.name())
class SubmissionTest(actions.TestBase):
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
unit_id = 'unit_id'
reviewee_key = models.Student(key_name='reviewee@example.com').put()
review_key = student_work.Submission(
reviewee_key=reviewee_key, unit_id=unit_id).put()
self.assertEqual(
student_work.Submission.key_name(unit_id, reviewee_key),
review_key.name())
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/review/review.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
import types
from models import models
from models import student_work
from modules.review import domain
from modules.review import peer
from modules.review import review as review_module
from tests.functional import actions
from google.appengine.ext import db
class ManagerTest(actions.TestBase):
"""Tests for review.Manager."""
# Don't require documentation for self-describing test methods.
# pylint: disable-msg=g-missing-docstring
def setUp(self): # Name set by parent. pylint: disable-msg=g-bad-name
super(ManagerTest, self).setUp()
self.reviewee = models.Student(key_name='reviewee@example.com')
self.reviewee_key = self.reviewee.put()
self.reviewer = models.Student(key_name='reviewer@example.com')
self.reviewer_key = self.reviewer.put()
self.unit_id = '1'
self.submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=self.reviewee_key, unit_id=self.unit_id))
def test_add_reviewer_adds_new_step_and_summary(self):
step_key = review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step = db.get(step_key)
summary = db.get(step.review_summary_key)
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_add_reviewer_existing_raises_assertion_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'no_summary_found_for_key')
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
AssertionError, review_module.Manager.add_reviewer, self.unit_id,
self.submission_key, self.reviewee_key, self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_completed(self):
summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_unremoved_existing_changes_expired_to_assigned(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_add_reviewer_removed_unremoves_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
def test_add_reviewer_removed_unremoves_completed_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.completed_count)
def test_add_reviewer_removed_unremoves_and_assigns_expired_step(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_delete_reviewer_marks_step_removed_and_decrements_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
deleted_key = review_module.Manager.delete_reviewer(step_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(step_key, deleted_key)
self.assertTrue(step.removed)
self.assertEqual(0, summary.assigned_count)
def test_delete_reviewer_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_delete_reviewer_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer, step_key)
def test_delete_reviewer_raises_removed_error_if_already_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.delete_reviewer,
step_key)
def test_expire_review_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.expire_review,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_expire_review_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.expire_review, step_key)
def test_expire_review_raises_transition_error_when_state_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_transition_error_when_state_expired(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_removed_error_when_step_removed(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.expire_review, step_key)
def test_expire_review_transitions_state_and_updates_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
expired_key = review_module.Manager.expire_review(step_key)
step, summary = db.get([expired_key, summary_key])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, step.state)
def test_expire_old_reviews_for_unit_expires_found_reviews(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
first_step, second_step, summary = db.get(
[first_step_key, second_step_key, summary_key])
self.assertEqual(
[domain.REVIEW_STATE_EXPIRED, domain.REVIEW_STATE_EXPIRED],
[step.state for step in [first_step, second_step]])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(2, summary.expired_count)
def test_expire_old_reviews_skips_errors_and_continues_processing(self):
# Create and bind a function that we can swap in to generate a query
# that will pick up bad results so we can tell that we skip them.
query_containing_unprocessable_entities = peer.ReviewStep.all(
keys_only=True)
query_fn = types.MethodType(
lambda x, y, z: query_containing_unprocessable_entities,
review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, 'get_expiry_query', query_fn)
summary_key = peer.ReviewSummary(
assigned_count=1, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
processable_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
error_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
processed_step, error_step, summary = db.get(
[processable_step_key, error_step_key, summary_key])
self.assertEqual(domain.REVIEW_STATE_COMPLETED, error_step.state)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, processed_step.state)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(1, summary.expired_count)
def test_get_assignment_candidates_query_filters_and_orders_correctly(self):
unused_wrong_unit_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=str(int(self.unit_id) + 1)
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
older_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
younger_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=third_reviewee_key,
submission_key=third_submission_key, unit_id=self.unit_id
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
completed_but_not_assigned_key = peer.ReviewSummary(
assigned_count=0, completed_count=1,
reviewee_key=fourth_reviewee_key,
submission_key=fourth_submission_key, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
assigned_but_not_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=0,
reviewee_key=fifth_reviewee_key,
submission_key=fifth_submission_key, unit_id=self.unit_id
).put()
results = review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(5)
self.assertEqual([
assigned_but_not_completed_key,
completed_but_not_assigned_key,
older_assigned_and_completed_key,
younger_assigned_and_completed_key
], [r.key() for r in results])
def test_get_expiry_query_filters_and_orders_correctly(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_completed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
unused_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
unused_other_unit_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=third_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=third_submission_key,
state=domain.REVIEW_STATE_ASSIGNED,
unit_id=str(int(self.unit_id) + 1)
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
first_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fourth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fourth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
second_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fifth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fifth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
zero_review_window_query = review_module.Manager.get_expiry_query(
0, self.unit_id)
future_review_window_query = review_module.Manager.get_expiry_query(
1, self.unit_id)
self.assertEqual(
[first_assigned_step_key, second_assigned_step_key],
zero_review_window_query.fetch(3))
# No items are > 1 minute old, so we expect an empty result set.
self.assertEqual(None, future_review_window_query.get())
def test_get_new_review_creates_step_and_updates_summary(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
summary = db.get(summary_key)
self.assertEqual(0, summary.assigned_count)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertEqual(summary.key(), step.review_summary_key)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_raises_key_error_when_summary_missing(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side effect delete the review summary, causing a
# the lookup by key to fail.
def pick_and_remove(unused_cls, candidates):
db.delete(summary_key)
return candidates[0]
fn = types.MethodType(
pick_and_remove, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
KeyError, review_module.Manager.get_new_review, self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_already_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
already_completed_unremoved_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
db.delete(already_completed_unremoved_step_key)
unused_already_completed_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_review_is_for_self(self):
peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_no_candidates(self):
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_retry_limit_hit(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key, max_retries=0)
def test_get_new_review_raises_not_assignable_when_summary_updated(self):
summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
summary.put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the summary so we'll reject it
# as a candidate.
def pick_and_update(unused_cls, candidates):
db.put(summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_reassigns_removed_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_assigned_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_reassigns_removed_expired_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_expired_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_get_new_review_retries_successfully(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step = db.get(step_key)
self.assertEqual(lower_priority_summary_key, step.review_summary_key)
def test_get_review_step_keys_by_returns_list_of_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='reviewer2@example.com')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertEqual(
[matching_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_keys_in_sorted_order(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
[first_step_key, second_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_empty_list_when_no_matches(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='reviewer2@example.com')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_different_reviewer_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id,
).put()
unused_non_matching_step_different_unit_id_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=str(int(self.unit_id) + 1),
).put()
self.assertEqual(
[], review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_steps_by_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewer_key = models.Student(
key_name='reviewer2@example.com').put()
missing_step_key = db.Key.from_path(
peer.ReviewStep.kind(),
peer.ReviewStep.key_name(
self.submission_key, second_reviewer_key))
model_objects = db.get([step_key, missing_step_key])
domain_objects = review_module.Manager.get_review_steps_by_keys(
[step_key, missing_step_key])
model_step, model_miss = model_objects
domain_step, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_step.assigner_kind, domain_step.assigner_kind)
self.assertEqual(model_step.change_date, domain_step.change_date)
self.assertEqual(model_step.create_date, domain_step.create_date)
self.assertEqual(model_step.key(), domain_step.key)
self.assertEqual(model_step.removed, domain_step.removed)
self.assertEqual(model_step.review_key, domain_step.review_key)
self.assertEqual(
model_step.review_summary_key, domain_step.review_summary_key)
self.assertEqual(model_step.reviewee_key, domain_step.reviewee_key)
self.assertEqual(model_step.reviewer_key, domain_step.reviewer_key)
self.assertEqual(model_step.state, domain_step.state)
self.assertEqual(model_step.submission_key, domain_step.submission_key)
self.assertEqual(model_step.unit_id, domain_step.unit_id)
def test_get_reviews_by_keys(self):
review_key = student_work.Review(
contents='contents', reviewer_key=self.reviewer_key,
unit_id=self.unit_id
).put()
missing_review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(
str(int(self.unit_id) + 1), self.reviewer_key))
model_objects = db.get([review_key, missing_review_key])
domain_objects = review_module.Manager.get_reviews_by_keys(
[review_key, missing_review_key])
model_review, model_miss = model_objects
domain_review, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_review.contents, domain_review.contents)
self.assertEqual(model_review.key(), domain_review.key)
def test_get_submission_and_review_step_keys_no_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, []),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_with_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
non_matching_submission_key = student_work.Submission(
contents='contents2', reviewee_key=non_matching_reviewee_key,
unit_id=self.unit_id).put()
unused_non_matching_step_different_submission_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key,
reviewee_key=non_matching_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=non_matching_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, [matching_step_key]),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_returns_none_on_miss(self):
self.assertIsNone(
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submissions_by_keys(self):
submission_key = student_work.Submission(
contents='contents', reviewee_key=self.reviewee_key,
unit_id=self.unit_id).put()
missing_submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
str(int(self.unit_id) + 1), self.reviewee_key))
domain_models = db.get([submission_key, missing_submission_key])
domain_objects = review_module.Manager.get_submissions_by_keys(
[submission_key, missing_submission_key])
model_submission, model_miss = domain_models
domain_submission, domain_miss = domain_objects
self.assertEqual(2, len(domain_models))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_submission.contents, domain_submission.contents)
self.assertEqual(model_submission.key(), domain_submission.key)
def test_start_review_process_for_succeeds(self):
key = review_module.Manager.start_review_process_for(
self.unit_id, self.submission_key, self.reviewee_key)
summary = db.get(key)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_start_review_process_for_throws_if_already_started(self):
collision = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
collision.put()
self.assertRaises(
domain.ReviewProcessAlreadyStartedError,
review_module.Manager.start_review_process_for,
self.unit_id, self.submission_key, self.reviewee_key)
def test_write_review_raises_constraint_error_if_key_but_no_review(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_constraint_error_if_no_summary(self):
missing_summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(self.submission_key))
review_key = student_work.Review(
contents='contents', reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=missing_summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_key_error_if_no_step(self):
bad_step_key = db.Key.from_path(peer.ReviewStep.kind(), 'missing')
self.assertRaises(
KeyError, review_module.Manager.write_review, bad_step_key,
'payload')
def test_write_review_raises_removed_error_if_step_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.write_review, step_key,
'payload')
def test_write_review_raises_transition_error_if_step_completed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_with_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_state_assigned_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_state_expired_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
| Python |
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for controllers pertaining to peer review assessments."""
__author__ = 'Sean Lip'
from models import transforms
import actions
from actions import assert_contains
from actions import assert_does_not_contain
from actions import assert_equals
# The unit id for the peer review assignment in the default course.
LEGACY_REVIEW_UNIT_ID = 'ReviewAssessmentExample'
def get_review_step_key(response):
"""Returns the review step key in a request query parameter."""
request_query_string = response.request.environ['QUERY_STRING']
return request_query_string[request_query_string.find('key=') + 4:]
def get_review_payload(identifier, is_draft=False):
"""Returns a sample review payload."""
review = transforms.dumps([
{'index': 0, 'type': 'choices', 'value': '0', 'correct': False},
{'index': 1, 'type': 'regex', 'value': identifier, 'correct': True}
])
return {
'answers': review,
'is_draft': 'true' if is_draft else 'false',
}
class PeerReviewControllerTest(actions.TestBase):
"""Test peer review from the Student perspective."""
def test_submit_assignment(self):
"""Test submission of peer-reviewed assignments."""
email = 'test_peer_reviewed_assignment_submission@google.com'
name = 'Test Peer Reviewed Assignment Submission'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'First answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'First answer to Q3',
'correct': True},
])
second_submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'Second answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'Second answer to Q3',
'correct': True},
])
# Check that the sample peer-review assignment shows up in the preview
# page.
response = actions.view_preview(self)
assert_contains('Sample peer review assignment', response.body)
assert_does_not_contain('Review peer assignments', response.body)
actions.login(email)
actions.register(self, name)
# Check that the sample peer-review assignment shows up in the course
# page and that it can be visited.
response = actions.view_course(self)
assert_contains('Sample peer review assignment', response.body)
assert_contains('Review peer assignments', response.body)
assert_contains(
'<a href="assessment?name=%s">' % LEGACY_REVIEW_UNIT_ID,
response.body)
assert_contains('<span> Review peer assignments </span>', response.body,
collapse_whitespace=True)
assert_does_not_contain('<a href="reviewdashboard', response.body,
collapse_whitespace=True)
# Check that the progress circle for this assignment is unfilled.
assert_contains(
'progress-notstarted-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
assert_does_not_contain(
'progress-completed-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
# Try to access an invalid assignment.
response = self.get(
'assessment?name=FakeAssessment', expect_errors=True)
assert_equals(response.status_int, 404)
# The student should not be able to see others' reviews because he/she
# has not submitted an assignment yet.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_does_not_contain('Submitted assignment', response.body)
assert_contains('Due date for this assignment', response.body)
assert_does_not_contain('Reviews received', response.body)
# The student should not be able to access the review dashboard because
# he/she has not submitted the assignment yet.
response = self.get(
'reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID,
expect_errors=True)
assert_contains('You must submit the assignment for', response.body)
# The student submits the assignment.
response = actions.submit_assessment(
self,
LEGACY_REVIEW_UNIT_ID,
{'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
)
assert_contains(
'Thank you for completing this assignment', response.body)
assert_contains('Review peer assignments', response.body)
# The student views the submitted assignment, which has become readonly.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('First answer to Q1', response.body)
assert_contains('Submitted assignment', response.body)
# The student tries to re-submit the same assignment. This should fail.
response = actions.submit_assessment(
self,
LEGACY_REVIEW_UNIT_ID,
{'answers': second_submission,
'assessment_type': LEGACY_REVIEW_UNIT_ID},
presubmit_checks=False
)
assert_contains(
'You have already submitted this assignment.', response.body)
assert_contains('Review peer assignments', response.body)
# The student views the submitted assignment. The new answers have not
# been saved.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('First answer to Q1', response.body)
assert_does_not_contain('Second answer to Q1', response.body)
# The student checks the course page and sees that the progress
# circle for this assignment has been filled, and that the 'Review
# peer assignments' link is now available.
response = actions.view_course(self)
assert_contains(
'progress-completed-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
assert_does_not_contain(
'<span> Review peer assignments </span>', response.body,
collapse_whitespace=True)
assert_contains(
'<a href="reviewdashboard?unit=%s">' % LEGACY_REVIEW_UNIT_ID,
response.body, collapse_whitespace=True)
# The student should also be able to now view the review dashboard.
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignments for your review', response.body)
assert_contains('Review a new assignment', response.body)
actions.logout()
def test_handling_of_fake_review_step_key(self):
"""Test that bad keys result in the appropriate responses."""
email = 'student1@google.com'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
actions.submit_assessment(self, LEGACY_REVIEW_UNIT_ID, payload)
actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, 'Fake key',
expected_status_code=404)
actions.logout()
def test_not_enough_assignments_to_allocate(self):
"""Test for the case when there are too few assignments in the pool."""
email = 'student1@google.com'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# The student goes to the review dashboard and requests an assignment
# to review -- but there is nothing to review.
response = actions.request_new_review(
self, LEGACY_REVIEW_UNIT_ID, expected_status_code=200)
assert_does_not_contain('Assignment to review', response.body)
assert_contains('Sorry, there are no new submissions ', response.body)
assert_contains('disabled="true"', response.body)
actions.logout()
def test_reviewer_cannot_impersonate_another_reviewer(self):
"""Test that one reviewer cannot use another's review step key."""
email1 = 'student1@google.com'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = 'student2@google.com'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = 'student3@google.com'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
# Student 2 requests a review, and is given Student 1's assignment.
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('S1-1', response.body)
actions.logout()
# Student 3 logs in, and submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
# Student 3 tries to view Student 1's assignment using Student 2's
# review step key, but is not allowed to.
response = actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
expected_status_code=404)
# Student 3 logs out.
actions.logout()
def test_student_cannot_see_reviews_prematurely(self):
"""Test that students cannot see others' reviews prematurely."""
email = 'student1@google.com'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# Student 1 cannot see the reviews for his assignment yet, because he
# has not submitted the two required reviews.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('Due date for this assignment', response.body)
assert_contains(
'After you have completed the required number of peer reviews',
response.body)
actions.logout()
def test_draft_review_behaviour(self):
"""Test correctness of draft review visibility."""
email1 = 'student1@google.com'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = 'student2@google.com'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = 'student3@google.com'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
# Student 2 requests a review, and is given Student 1's assignment.
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('S1-1', response.body)
# Student 2 saves her review as a draft.
review_2_for_1_payload = get_review_payload(
'R2for1', is_draft=True)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
review_2_for_1_payload)
assert_contains('Your review has been saved.', response.body)
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('(Draft)', response.body)
# Student 2's draft is still changeable.
response = actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1)
assert_contains('Submit Review', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
review_2_for_1_payload)
assert_contains('Your review has been saved.', response.body)
# Student 2 logs out.
actions.logout()
# Student 3 submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
actions.logout()
# Student 1 logs in and requests two assignments to review.
actions.login(email1)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone = get_review_step_key(response)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone_else = get_review_step_key(response)
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('disabled="true"', response.body)
# Student 1 submits both reviews, fulfilling his quota.
review_1_for_other_payload = get_review_payload('R1for')
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone,
review_1_for_other_payload)
assert_contains(
'Your review has been submitted successfully', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone_else,
review_1_for_other_payload)
assert_contains(
'Your review has been submitted successfully', response.body)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('(Completed)', response.body)
assert_does_not_contain('(Draft)', response.body)
# Although Student 1 has submitted 2 reviews, he cannot view Student
# 2's review because it is still in Draft status.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains(
'You have not received any peer reviews yet.', response.body)
assert_does_not_contain('R2for1', response.body)
# Student 1 logs out.
actions.logout()
# Student 2 submits her review for Student 1's assignment.
actions.login(email2)
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1))
assert_does_not_contain('Submitted review', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
get_review_payload('R2for1'))
assert_contains(
'Your review has been submitted successfully', response.body)
# Her review is now read-only.
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1))
assert_contains('Submitted review', response.body)
assert_contains('R2for1', response.body)
# Student 2 logs out.
actions.logout()
# Now Student 1 can see the review he has received from Student 2.
actions.login(email1)
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('R2for1', response.body)
class PeerReviewDashboardTest(actions.TestBase):
"""Test peer review from the Admin perspective."""
def test_add_reviewer(self):
"""Test that admin can add a reviewer, and cannot re-add reviewers."""
email = 'test_add_reviewer@google.com'
name = 'Test Add Reviewer'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'First answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'First answer to Q3',
'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# There is nothing to review on the review dashboard.
response = actions.request_new_review(
self, LEGACY_REVIEW_UNIT_ID, expected_status_code=200)
assert_does_not_contain('Assignment to review', response.body)
assert_contains('Sorry, there are no new submissions ', response.body)
actions.logout()
# The admin assigns the student to review his own work.
actions.login(email, is_admin=True)
response = actions.add_reviewer(
self, LEGACY_REVIEW_UNIT_ID, email, email)
assert_equals(response.status_int, 302)
response = self.get(response.location)
assert_does_not_contain(
'Error 412: The reviewer is already assigned', response.body)
assert_contains('First answer to Q1', response.body)
assert_contains(
'Review 1 from test_add_reviewer@google.com', response.body)
# The admin repeats the 'add reviewer' action. This should fail.
response = actions.add_reviewer(
self, LEGACY_REVIEW_UNIT_ID, email, email)
assert_equals(response.status_int, 302)
response = self.get(response.location)
assert_contains(
'Error 412: The reviewer is already assigned', response.body)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models/utils.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import counters
from models import utils
from tests.functional import actions
from google.appengine.ext import db
class Model(db.Model):
create_date = db.DateTimeProperty(auto_now=True, indexed=True)
number = db.IntegerProperty(indexed=True)
string = db.StringProperty()
def process(model, number, string=None):
model.number = number
model.string = string
db.put(model)
def stop_mapping_at_5(model):
if model.number == 5:
raise utils.StopMapping
class QueryMapperTest(actions.TestBase):
"""Tests for utils.QueryMapper."""
def test_raising_stop_mapping_stops_execution(self):
db.put([Model(number=x) for x in xrange(11)])
num_processed = utils.QueryMapper(
Model.all().order('number')).run(stop_mapping_at_5)
self.assertEqual(5, num_processed)
def test_run_processes_empty_result_set(self):
self.assertEqual(
0, utils.QueryMapper(Model.all()).run(process, 1, string='foo'))
def test_run_processes_one_entity(self):
"""Tests that we can process < batch_size results."""
Model().put()
num_processed = utils.QueryMapper(
Model.all()).run(process, 1, string='foo')
model = Model.all().get()
self.assertEqual(1, num_processed)
self.assertEqual(1, model.number)
self.assertEqual('foo', model.string)
def test_run_process_more_than_1000_entities(self):
"""Tests we can process more entities than the old limit of 1k."""
counter = counters.PerfCounter(
'test-run-process-more-than-1000-entities-counter',
'counter for testing increment by QueryMapper')
db.put([Model() for _ in xrange(1001)])
# Also pass custom args to QueryMapper ctor.
num_processed = utils.QueryMapper(
Model.all(), batch_size=50, counter=counter, report_every=0
).run(process, 1, string='foo')
last_written = Model.all().order('-create_date').get()
self.assertEqual(1001, counter.value)
self.assertEqual(1001, num_processed)
self.assertEqual(1, last_written.number)
self.assertEqual('foo', last_written.string)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models/student_work.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import entities
from models import models
from models import student_work
from tests.functional import actions
from google.appengine.ext import db
class ReferencedModel(entities.BaseEntity):
pass
class UnvalidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty()
class ValidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty(kind=ReferencedModel.kind())
class KeyPropertyTest(actions.TestBase):
"""Tests KeyProperty."""
def setUp(self): # From superclass. pylint: disable-msg=g-bad-name
super(KeyPropertyTest, self).setUp()
self.referenced_model_key = ReferencedModel().put()
def test_validation_and_datastore_round_trip_of_keys_succeeds(self):
"""Tests happy path for both validation and (de)serialization."""
model_with_reference = ValidatedReference(
referenced_model_key=self.referenced_model_key)
model_with_reference_key = model_with_reference.put()
model_with_reference_from_datastore = db.get(model_with_reference_key)
self.assertEqual(
self.referenced_model_key,
model_with_reference_from_datastore.referenced_model_key)
custom_model_from_datastore = db.get(
model_with_reference_from_datastore.referenced_model_key)
self.assertEqual(
self.referenced_model_key, custom_model_from_datastore.key())
self.assertTrue(isinstance(
model_with_reference_from_datastore.referenced_model_key,
db.Key))
def test_type_not_validated_if_kind_not_passed(self):
model_key = db.Model().put()
unvalidated = UnvalidatedReference(referenced_model_key=model_key)
self.assertEqual(model_key, unvalidated.referenced_model_key)
def test_validation_fails(self):
model_key = db.Model().put()
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key='not_a_key')
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key=model_key)
class ReviewTest(actions.TestBase):
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
unit_id = 'unit_id'
reviewer_key = models.Student(key_name='reviewer@example.com').put()
review_key = student_work.Review(
reviewer_key=reviewer_key, unit_id=unit_id).put()
self.assertEqual(
student_work.Review.key_name(unit_id, reviewer_key),
review_key.name())
class SubmissionTest(actions.TestBase):
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
unit_id = 'unit_id'
reviewee_key = models.Student(key_name='reviewee@example.com').put()
review_key = student_work.Submission(
reviewee_key=reviewee_key, unit_id=unit_id).put()
self.assertEqual(
student_work.Submission.key_name(unit_id, reviewee_key),
review_key.name())
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""A collection of actions for testing Course Builder pages."""
import cgi
import logging
import os
import re
import urllib
import appengine_config
from controllers import sites
from controllers import utils
import main
from models import config
from tests import suite
from google.appengine.api import namespace_manager
# All URLs referred to from all the pages.
UNIQUE_URLS_FOUND = {}
BASE_HOOK_POINTS = [
'<!-- base.before_head_tag_ends -->',
'<!-- base.after_body_tag_begins -->',
'<!-- base.after_navbar_begins -->',
'<!-- base.before_navbar_ends -->',
'<!-- base.after_top_content_ends -->',
'<!-- base.after_main_content_ends -->',
'<!-- base.before_body_tag_ends -->']
UNIT_HOOK_POINTS = [
'<!-- unit.after_leftnav_begins -->',
'<!-- unit.before_leftnav_ends -->',
'<!-- unit.after_content_begins -->',
'<!-- unit.before_content_ends -->']
PREVIEW_HOOK_POINTS = [
'<!-- preview.after_top_content_ends -->',
'<!-- preview.after_main_content_ends -->']
class ShouldHaveFailedByNow(Exception):
"""Special exception raised when a prior method did not raise."""
pass
class TestBase(suite.AppEngineTestBase):
"""Contains methods common to all functional tests."""
def getApp(self): # pylint: disable-msg=g-bad-name
main.debug = True
sites.ApplicationRequestHandler.bind(main.namespaced_routes)
return main.app
def assert_default_namespace(self):
ns = namespace_manager.get_namespace()
if ns != appengine_config.DEFAULT_NAMESPACE_NAME:
raise Exception('Expected default namespace, found: %s' % ns)
def setUp(self): # pylint: disable-msg=g-bad-name
super(TestBase, self).setUp()
self.supports_editing = False
self.assert_default_namespace()
self.namespace = ''
self.base = '/'
# Reload all properties now to flush the values modified in other tests.
config.Registry.get_overrides(True)
def tearDown(self): # pylint: disable-msg=g-bad-name
self.assert_default_namespace()
super(TestBase, self).tearDown()
def canonicalize(self, href, response=None):
"""Create absolute URL using <base> if defined, '/' otherwise."""
if href.startswith('/') or utils.ApplicationHandler.is_absolute(href):
pass
else:
base = '/'
if response:
match = re.search(
r'<base href=[\'"]?([^\'" >]+)', response.body)
if match and not href.startswith('/'):
base = match.groups()[0]
href = '%s%s' % (base, href)
self.audit_url(href)
return href
def audit_url(self, url):
"""Record for audit purposes the URL we encountered."""
UNIQUE_URLS_FOUND[url] = True
def hook_response(self, response):
"""Modify response.goto() to compute URL using <base>, if defined."""
if response.status_int == 200:
self.check_response_hrefs(response)
gotox = response.goto
def new_goto(href, method='get', **args):
return gotox(self.canonicalize(href), method, **args)
response.goto = new_goto
return response
def check_response_hrefs(self, response):
"""Check response page URLs are properly formatted/canonicalized."""
hrefs = re.findall(r'href=[\'"]?([^\'" >]+)', response.body)
srcs = re.findall(r'src=[\'"]?([^\'" >]+)', response.body)
for url in hrefs + srcs:
# We expect all internal URLs to be relative: 'asset/css/main.css',
# and use <base> tag. All others URLs must be whitelisted below.
if url.startswith('/'):
absolute = url.startswith('//')
root = url == '/'
canonical = url.startswith(self.base)
allowed = url.startswith('/admin') or url.startswith('/_ah/')
if not (absolute or root or canonical or allowed):
raise Exception('Invalid reference \'%s\' in:\n%s' % (
url, response.body))
self.audit_url(self.canonicalize(url, response=response))
def get(self, url, **kwargs):
url = self.canonicalize(url)
logging.info('HTTP Get: %s', url)
response = self.testapp.get(url, **kwargs)
return self.hook_response(response)
def post(self, url, params, expect_errors=False):
url = self.canonicalize(url)
logging.info('HTTP Post: %s', url)
response = self.testapp.post(url, params, expect_errors=expect_errors)
return self.hook_response(response)
def put(self, url, params):
url = self.canonicalize(url)
logging.info('HTTP Put: %s', url)
response = self.testapp.put(url, params)
return self.hook_response(response)
def click(self, response, name):
logging.info('Link click: %s', name)
response = response.click(name)
return self.hook_response(response)
def submit(self, form):
logging.info('Form submit: %s', form)
response = form.submit()
return self.hook_response(response)
def assert_equals(actual, expected):
if expected != actual:
raise Exception('Expected \'%s\', does not match actual \'%s\'.' %
(expected, actual))
def to_unicode(text):
"""Converts text to Unicode if is not Unicode already."""
if not isinstance(text, unicode):
return unicode(text, 'utf-8')
return text
def assert_contains(needle, haystack, collapse_whitespace=False):
needle = to_unicode(needle)
haystack = to_unicode(haystack)
if collapse_whitespace:
haystack = ' '.join(haystack.replace('\n', ' ').split())
if needle not in haystack:
raise Exception('Can\'t find \'%s\' in \'%s\'.' % (needle, haystack))
def assert_contains_all_of(needles, haystack):
haystack = to_unicode(haystack)
for needle in needles:
needle = to_unicode(needle)
if needle not in haystack:
raise Exception(
'Can\'t find \'%s\' in \'%s\'.' % (needle, haystack))
def assert_does_not_contain(needle, haystack, collapse_whitespace=False):
needle = to_unicode(needle)
haystack = to_unicode(haystack)
if collapse_whitespace:
haystack = ' '.join(haystack.replace('\n', ' ').split())
if needle in haystack:
raise Exception('Found \'%s\' in \'%s\'.' % (needle, haystack))
def assert_contains_none_of(needles, haystack):
haystack = to_unicode(haystack)
for needle in needles:
needle = to_unicode(needle)
if needle in haystack:
raise Exception('Found \'%s\' in \'%s\'.' % (needle, haystack))
def assert_none_fail(browser, callbacks):
"""Invokes all callbacks and expects each one not to fail."""
for callback in callbacks:
callback(browser)
def assert_all_fail(browser, callbacks):
"""Invokes all callbacks and expects each one to fail."""
for callback in callbacks:
try:
callback(browser)
raise ShouldHaveFailedByNow(
'Expected to fail: %s().' % callback.__name__)
except ShouldHaveFailedByNow as e:
raise e
except Exception:
pass
def login(email, is_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = email
is_admin_value = '0'
if is_admin:
is_admin_value = '1'
os.environ['USER_IS_ADMIN'] = is_admin_value
def get_current_user_email():
email = os.environ['USER_EMAIL']
if not email:
raise Exception('No current user.')
return email
def logout():
del os.environ['USER_EMAIL']
del os.environ['USER_ID']
del os.environ['USER_IS_ADMIN']
def register(browser, name):
"""Registers a new student with the given name."""
response = browser.get('/')
assert_equals(response.status_int, 302)
response = view_registration(browser)
response.form.set('form01', name)
response = browser.submit(response.form)
assert_contains('Thank you for registering for', response.body)
check_profile(browser, name)
def check_profile(browser, name):
response = view_my_profile(browser)
assert_contains('Email', response.body)
assert_contains(cgi.escape(name), response.body)
assert_contains(get_current_user_email(), response.body)
return response
def view_registration(browser):
response = browser.get('register')
assert_contains('What is your name?', response.body)
assert_contains_all_of([
'<!-- reg_form.additional_registration_fields -->'], response.body)
return response
def register_with_additional_fields(browser, name, data2, data3):
"""Registers a new student with customized registration form."""
response = browser.get('/')
assert_equals(response.status_int, 302)
response = view_registration(browser)
response.form.set('form01', name)
response.form.set('form02', data2)
response.form.set('form03', data3)
response = browser.submit(response.form)
assert_contains('Thank you for registering for', response.body)
check_profile(browser, name)
def view_preview(browser):
"""Views /preview page."""
response = browser.get('preview')
assert_contains(' the stakes are high.', response.body)
assert_contains(
'<li><p class="gcb-top-content">Pre-course assessment</p></li>',
response.body)
assert_contains_none_of(UNIT_HOOK_POINTS, response.body)
assert_contains_all_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_course(browser):
"""Views /course page."""
response = browser.get('course')
assert_contains(' the stakes are high.', response.body)
assert_contains('<a href="assessment?name=Pre">Pre-course assessment</a>',
response.body)
assert_contains(get_current_user_email(), response.body)
assert_contains_all_of(BASE_HOOK_POINTS, response.body)
assert_contains_none_of(UNIT_HOOK_POINTS, response.body)
assert_contains_none_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_unit(browser):
"""Views /unit page."""
response = browser.get('unit?unit=1&lesson=1')
assert_contains('Unit 1 - Introduction', response.body)
assert_contains('1.3 How search works', response.body)
assert_contains('1.6 Finding text on a web page', response.body)
assert_contains('https://www.youtube.com/embed/1ppwmxidyIE', response.body)
assert_contains(get_current_user_email(), response.body)
assert_contains_all_of(BASE_HOOK_POINTS, response.body)
assert_contains_all_of(UNIT_HOOK_POINTS, response.body)
assert_contains_none_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_activity(browser):
response = browser.get('activity?unit=1&lesson=2')
assert_contains('<script src="assets/js/activity-1.2.js"></script>',
response.body)
assert_contains(get_current_user_email(), response.body)
return response
def view_announcements(browser):
response = browser.get('announcements')
assert_equals(response.status_int, 200)
assert_contains(get_current_user_email(), response.body)
return response
def view_my_profile(browser):
response = browser.get('student/home')
assert_contains('Date enrolled', response.body)
assert_contains(get_current_user_email(), response.body)
return response
def view_forum(browser):
response = browser.get('forum')
assert_contains('document.getElementById("forum_embed").src =',
response.body)
assert_contains(get_current_user_email(), response.body)
return response
def view_assessments(browser):
for name in ['Pre', 'Mid', 'Fin']:
response = browser.get('assessment?name=%s' % name)
assert 'assets/js/assessment-%s.js' % name in response.body
assert_equals(response.status_int, 200)
assert_contains(get_current_user_email(), response.body)
def submit_assessment(browser, unit_id, args, base='', presubmit_checks=True):
"""Submits an assessment."""
response = browser.get('%s/assessment?name=%s' % (base, unit_id))
if presubmit_checks:
assert_contains(
'<script src="assets/js/assessment-%s.js"></script>' % unit_id,
response.body)
js_response = browser.get(
'%s/assets/js/assessment-%s.js' % (base, unit_id))
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'assessmentXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
response = browser.post('%s/answer' % base, args)
assert_equals(response.status_int, 200)
return response
def request_new_review(browser, unit_id, base='', expected_status_code=302):
"""Requests a new assignment to review."""
response = browser.get('%s/reviewdashboard?unit=%s' % (base, unit_id))
assert_contains('Assignments for your review', response.body)
# Extract XSRF token from the page.
match = re.search(
r'<input type="hidden" name="xsrf_token"\s* value="([^"]*)">',
response.body)
assert match
xsrf_token = match.group(1)
args = {'xsrf_token': xsrf_token}
expect_errors = (expected_status_code not in [200, 302])
response = browser.post(
'%s/reviewdashboard?unit=%s' % (base, unit_id), args,
expect_errors=expect_errors)
assert_equals(response.status_int, expected_status_code)
if expected_status_code == 302:
assert_equals(response.status_int, expected_status_code)
assert_contains(
'review?unit=%s' % unit_id, response.location)
response = browser.get(response.location)
assert_contains('Assignment to review', response.body)
return response
def view_review(
browser, unit_id, review_step_key, base='', expected_status_code=200):
"""View a review page."""
response = browser.get(
'%s/review?unit=%s&key=%s' % (base, unit_id, review_step_key),
expect_errors=(expected_status_code != 200))
assert_equals(response.status_int, expected_status_code)
if expected_status_code == 200:
assert_contains('Assignment to review', response.body)
return response
def submit_review(
browser, unit_id, review_step_key, args, base='', presubmit_checks=True):
"""Submits a review."""
response = browser.get(
'%s/review?unit=%s&key=%s' % (base, unit_id, review_step_key))
if presubmit_checks:
assert_contains(
'<script src="assets/js/review-%s.js"></script>' % unit_id,
response.body)
js_response = browser.get(
'%s/assets/js/review-%s.js' % (base, unit_id))
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'assessmentXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
args['key'] = review_step_key
args['unit_id'] = unit_id
response = browser.post('%s/review' % base, args)
assert_equals(response.status_int, 200)
return response
def add_reviewer(browser, unit_id, reviewee_email, reviewer_email):
"""Adds a reviewer to a submission."""
url_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_email,
'unit_id': unit_id,
}
response = browser.get('/dashboard?%s' % urllib.urlencode(url_params))
# Extract XSRF token from the page.
match = re.search(
r'<input type="hidden" name="xsrf_token"\s* value="([^"]*)">',
response.body)
assert match
xsrf_token = match.group(1)
args = {
'xsrf_token': xsrf_token,
'reviewer_id': reviewer_email,
'reviewee_id': reviewee_email,
'unit_id': unit_id,
}
response = browser.post('/dashboard?action=add_reviewer', args)
return response
def change_name(browser, new_name):
response = browser.get('student/home')
response.form.set('name', new_name)
response = browser.submit(response.form)
assert_equals(response.status_int, 302)
check_profile(browser, new_name)
def unregister(browser):
response = browser.get('student/home')
response = browser.click(response, 'Unenroll')
assert_contains('to unenroll from', response.body)
browser.submit(response.form)
class Permissions(object):
"""Defines who can see what."""
@classmethod
def get_logged_out_allowed_pages(cls):
"""Returns all pages that a logged-out user can see."""
return [view_preview]
@classmethod
def get_logged_out_denied_pages(cls):
"""Returns all pages that a logged-out user can't see."""
return [view_announcements, view_forum, view_course, view_assessments,
view_unit, view_activity, view_my_profile, view_registration]
@classmethod
def get_enrolled_student_allowed_pages(cls):
"""Returns all pages that a logged-in, enrolled student can see."""
return [view_announcements, view_forum, view_course,
view_assessments, view_unit, view_activity, view_my_profile]
@classmethod
def get_enrolled_student_denied_pages(cls):
"""Returns all pages that a logged-in, enrolled student can't see."""
return [view_registration, view_preview]
@classmethod
def get_unenrolled_student_allowed_pages(cls):
"""Returns all pages that a logged-in, unenrolled student can see."""
return [view_registration, view_preview]
@classmethod
def get_unenrolled_student_denied_pages(cls):
"""Returns all pages that a logged-in, unenrolled student can't see."""
pages = Permissions.get_enrolled_student_allowed_pages()
for allowed in Permissions.get_unenrolled_student_allowed_pages():
if allowed in pages:
pages.remove(allowed)
return pages
@classmethod
def assert_logged_out(cls, browser):
"""Check that only pages for a logged-out user are visible."""
assert_none_fail(browser, Permissions.get_logged_out_allowed_pages())
assert_all_fail(browser, Permissions.get_logged_out_denied_pages())
@classmethod
def assert_enrolled(cls, browser):
"""Check that only pages for an enrolled student are visible."""
assert_none_fail(
browser, Permissions.get_enrolled_student_allowed_pages())
assert_all_fail(
browser, Permissions.get_enrolled_student_denied_pages())
@classmethod
def assert_unenrolled(cls, browser):
"""Check that only pages for an unenrolled student are visible."""
assert_none_fail(
browser, Permissions.get_unenrolled_student_allowed_pages())
assert_all_fail(
browser, Permissions.get_unenrolled_student_denied_pages())
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder test suite.
This script runs all functional and units test in the Course Builder project.
Here is how to use the script:
- download WebTest Python package from a URL below and put
the files in a folder of your choice, for example: tmp/webtest:
http://pypi.python.org/packages/source/W/WebTest/WebTest-1.4.2.zip
- update your Python path:
PYTHONPATH=$PYTHONPATH:/tmp/webtest
- invoke this test suite from the command line:
# Automatically find and run all Python tests in tests/*.
python tests/suite.py
# Run only tests matching shell glob *_functional_test.py in tests/*.
python tests/suite.py --pattern *_functional_test.py
# Run test method baz in unittest.TestCase Bar found in tests/foo.py.
python tests/suite.py --test_class_name tests.foo.Bar.baz
- review the output to make sure there are no errors or warnings
Good luck!
"""
__author__ = 'Sean Lip'
import argparse
import base64
import os
import shutil
import signal
import subprocess
import sys
import time
import unittest
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
import webtest
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import deferred
from google.appengine.ext import testbed
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--pattern', default='*.py',
help='shell pattern for discovering files containing tests', type=str)
_PARSER.add_argument(
'--test_class_name',
help='optional dotted module name of the test(s) to run', type=str)
_PARSER.add_argument(
'--integration_server_start_cmd',
help='script to start an external CB server', type=str)
# Base filesystem location for test data.
TEST_DATA_BASE = '/tmp/experimental/coursebuilder/test-data/'
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
class TestBase(unittest.TestCase):
"""Base class for all Course Builder tests."""
REQUIRES_INTEGRATION_SERVER = 1
INTEGRATION_SERVER_BASE_URL = 'http://localhost:8081'
def setUp(self):
super(TestBase, self).setUp()
# Map of object -> {symbol_string: original_value}
self._originals = {}
def tearDown(self):
self._unswap_all()
super(TestBase, self).tearDown()
def swap(self, source, symbol, new):
"""Swaps out source.symbol for a new value.
Allows swapping of members and methods:
myobject.foo = 'original_foo'
self.swap(myobject, 'foo', 'bar')
self.assertEqual('bar', myobject.foo)
myobject.baz() # -> 'original_baz'
self.swap(myobject, 'baz', lambda: 'quux')
self.assertEqual('quux', myobject.bar())
Swaps are automatically undone in tearDown().
Args:
source: object. The source object to swap from.
symbol: string. The name of the symbol to swap.
new: object. The new value to swap in.
"""
if source not in self._originals:
self._originals[source] = {}
if not self._originals[source].get(symbol, None):
self._originals[source][symbol] = getattr(source, symbol)
setattr(source, symbol, new)
# Allow protected method names. pylint: disable-msg=g-bad-name
def _unswap_all(self):
for source, symbol_to_value in self._originals.iteritems():
for symbol, value in symbol_to_value.iteritems():
setattr(source, symbol, value)
def shortDescription(self):
"""Additional information logged during unittest invocation."""
# Suppress default logging of docstrings. Instead log name/status only.
return None
class FunctionalTestBase(TestBase):
"""Base class for functional tests."""
def setUp(self):
super(FunctionalTestBase, self).setUp()
# e.g. TEST_DATA_BASE/tests/functional/tests/MyTestCase.
self.test_tempdir = os.path.join(
TEST_DATA_BASE, self.__class__.__module__.replace('.', os.sep),
self.__class__.__name__)
self.reset_filesystem()
def tearDown(self):
self.reset_filesystem(remove_only=True)
super(FunctionalTestBase, self).tearDown()
def reset_filesystem(self, remove_only=False):
if os.path.exists(self.test_tempdir):
shutil.rmtree(self.test_tempdir)
if not remove_only:
os.makedirs(self.test_tempdir)
class AppEngineTestBase(FunctionalTestBase):
"""Base class for tests that require App Engine services."""
def getApp(self): # pylint: disable-msg=g-bad-name
"""Returns the main application to be tested."""
raise Exception('Not implemented.')
def setUp(self): # pylint: disable-msg=g-bad-name
super(AppEngineTestBase, self).setUp()
empty_environ()
# setup an app to be tested
self.testapp = webtest.TestApp(self.getApp())
self.testbed = testbed.Testbed()
self.testbed.activate()
# configure datastore policy to emulate instantaneously and globally
# consistent HRD; we also patch dev_appserver in main.py to run under
# the same policy
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# declare any relevant App Engine service stubs here
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self): # pylint: disable-msg=g-bad-name
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def execute_all_deferred_tasks(self, queue_name='default'):
"""Executes all pending deferred tasks."""
for task in self.taskq.GetTasks(queue_name):
deferred.run(base64.b64decode(task['body']))
def create_test_suite(parsed_args):
"""Loads all requested test suites.
By default, loads all unittest.TestCases found under the project root's
tests/ directory.
Args:
parsed_args: argparse.Namespace. Processed command-line arguments.
Returns:
unittest.TestSuite. The test suite populated with all tests to run.
"""
loader = unittest.TestLoader()
if parsed_args.test_class_name:
return loader.loadTestsFromName(parsed_args.test_class_name)
else:
return loader.discover(
os.path.dirname(__file__), pattern=parsed_args.pattern)
def start_integration_server(integration_server_start_cmd):
print 'Starting external server: %s' % integration_server_start_cmd
server = subprocess.Popen(integration_server_start_cmd)
time.sleep(3) # Wait for server to start up
return server
def stop_integration_server(server):
server.kill() # dev_appserver.py itself.
# The new dev appserver starts a _python_runtime.py process that isn't
# captured by start_integration_server and so doesn't get killed. Until it's
# done, our tests will never complete so we kill it manually.
pid = int(subprocess.Popen(
['pgrep', '-f', '_python_runtime.py'], stdout=subprocess.PIPE
).communicate()[0][:-1])
os.kill(pid, signal.SIGKILL)
def fix_sys_path():
"""Fix the sys.path to include GAE extra paths."""
import dev_appserver # pylint: disable=C6204
# dev_appserver.fix_sys_path() prepends GAE paths to sys.path and hides
# our classes like 'tests' behind other modules that have 'tests'.
# Here, unlike dev_appserver, we append the path instead of prepending it,
# so that our classes come first.
sys.path += dev_appserver.EXTRA_PATHS[:]
def main():
"""Starts in-process server and runs all test cases in this module."""
fix_sys_path()
parsed_args = _PARSER.parse_args()
test_suite = create_test_suite(parsed_args)
all_tags = set()
for test in iterate_tests(test_suite):
if hasattr(test, 'TAGS'):
all_tags.update(test.TAGS)
server = None
if TestBase.REQUIRES_INTEGRATION_SERVER in all_tags:
server = start_integration_server(
parsed_args.integration_server_start_cmd)
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
if server:
stop_integration_server(server)
if result.errors or result.failures:
raise Exception(
'Test suite failed: %s errors, %s failures of '
' %s tests run.' % (
len(result.errors), len(result.failures), result.testsRun))
import tests.functional.actions as actions # pylint: disable-msg=g-import-not-at-top
count = len(actions.UNIQUE_URLS_FOUND.keys())
result.stream.writeln('INFO: Unique URLs found: %s' % count)
result.stream.writeln('INFO: All %s tests PASSED!' % result.testsRun)
if __name__ == '__main__':
appengine_config.gcb_force_default_encoding('ascii')
main()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of converters between db models, Python and JSON dictionaries, etc."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import base64
import datetime
import json
from google.appengine.ext import db
JSON_DATE_FORMAT = '%Y/%m/%d'
JSON_TYPES = ['string', 'date', 'text', 'html', 'boolean', 'integer', 'array']
# Prefix to add to all JSON responses to guard against XSSI. Must be kept in
# sync with modules/oeditor/oeditor.html.
_JSON_XSSI_PREFIX = ")]}'\n"
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
SUPPORTED_TYPES = (db.GeoPt, datetime.date)
def dict_to_json(source_dict, unused_schema):
"""Converts Python dictionary into JSON dictionary using schema."""
output = {}
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
output[key] = value.strftime(JSON_DATE_FORMAT)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
else:
raise ValueError(
'Failed to encode key \'%s\' with value \'%s\'.' % (key, value))
return output
def dumps(*args, **kwargs):
"""Wrapper around json.dumps.
No additional behavior; present here so this module is a drop-in replacement
for json.dumps|loads. Clients should never use json.dumps|loads directly.
See usage docs at http://docs.python.org/2/library/json.html.
Args:
*args: positional arguments delegated to json.dumps.
**kwargs: keyword arguments delegated to json.dumps.
Returns:
string. The converted JSON.
"""
return json.dumps(*args, **kwargs)
def loads(s, prefix=_JSON_XSSI_PREFIX, **kwargs):
"""Wrapper around json.loads that handles XSSI-protected responses.
To prevent XSSI we insert a prefix before our JSON responses during server-
side rendering. This loads() removes the prefix and should always be used in
place of json.loads. See usage docs at
http://docs.python.org/2/library/json.html.
Args:
s: str or unicode. JSON contents to convert.
prefix: string. The XSSI prefix we remove before conversion.
**kwargs: keyword arguments delegated to json.loads.
Returns:
object. Python object reconstituted from the given JSON string.
"""
if s.startswith(prefix):
s = s.lstrip(prefix)
return json.loads(s, **kwargs)
def json_to_dict(source_dict, schema):
"""Converts JSON dictionary into Python dictionary using schema."""
output = {}
for key, attr in schema['properties'].items():
# Skip schema elements that don't exist in source.
if key not in source_dict:
continue
attr_type = attr['type']
if attr_type not in JSON_TYPES:
raise ValueError('Unsupported JSON type: %s' % attr_type)
if attr_type == 'date':
output[key] = datetime.datetime.strptime(
source_dict[key], JSON_DATE_FORMAT).date()
elif attr_type == 'array':
subschema = attr['items']
array = []
for item in source_dict[key]:
array.append(json_to_dict(item, subschema))
output[key] = array
else:
output[key] = source_dict[key]
return output
def entity_to_dict(entity, force_utf_8_encoding=False):
"""Puts model object attributes into a Python dictionary."""
output = {}
for key, prop in entity.properties().iteritems():
value = getattr(entity, key)
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
output[key] = value
# some values are raw bytes; force utf-8 or base64 encoding
if force_utf_8_encoding and isinstance(value, basestring):
try:
output[key] = value.encode('utf-8')
except UnicodeDecodeError:
output[key] = {
'type': 'binary',
'encoding': 'base64',
'content': base64.urlsafe_b64encode(value)}
else:
raise ValueError('Failed to encode: %s' % prop)
# explicitly add entity key as a 'string' attribute
output['key'] = str(entity.key())
return output
def dict_to_entity(entity, source_dict):
"""Sets model object attributes from a Python dictionary."""
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
setattr(entity, key, value)
else:
raise ValueError('Failed to encode: %s' % value)
return entity
def string_to_value(string, value_type):
"""Converts string representation to a value."""
if value_type == str:
if not string:
return ''
else:
return string
elif value_type == bool:
if string == '1' or string == 'True' or string == 1:
return True
else:
return False
elif value_type == int or value_type == long:
if not string:
return 0
else:
return long(string)
else:
raise ValueError('Unknown type: %s' % value_type)
def value_to_string(value, value_type):
"""Converts value to a string representation."""
if value_type == str:
return value
elif value_type == bool:
if value:
return 'True'
else:
return 'False'
elif value_type == int or value_type == long:
return str(value)
else:
raise ValueError('Unknown type: %s' % value_type)
def dict_to_instance(adict, instance, defaults=None):
"""Populates instance attributes using data dictionary."""
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
if key in adict:
setattr(instance, key, adict[key])
elif defaults and key in defaults:
setattr(instance, key, defaults[key])
else:
raise KeyError(key)
def instance_to_dict(instance):
"""Populates data dictionary from instance attrs."""
adict = {}
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
adict[key] = getattr(instance, key)
return adict
def send_json_response(
handler, status_code, message, payload_dict=None, xsrf_token=None):
"""Formats and sends out a JSON REST response envelope and body."""
handler.response.headers[
'Content-Type'] = 'application/javascript; charset=utf-8'
handler.response.headers['X-Content-Type-Options'] = 'nosniff'
handler.response.headers['Content-Disposition'] = 'attachment'
response = {}
response['status'] = status_code
response['message'] = message
if payload_dict:
response['payload'] = dumps(payload_dict)
if xsrf_token:
response['xsrf_token'] = xsrf_token
handler.response.write(_JSON_XSSI_PREFIX + dumps(response))
def send_json_file_upload_response(handler, status_code, message):
"""Formats and sends out a JSON REST response envelope and body.
NOTE: This method has lowered protections against XSSI (compared to
send_json_response) and so it MUST NOT be used with dynamic data. Use ONLY
constant data originating entirely on the server as arguments.
Args:
handler: the request handler.
status_code: the HTTP status code for the response.
message: the text of the message - must not be dynamic data.
"""
# The correct MIME type for JSON is application/json but there are issues
# with our AJAX file uploader in MSIE which require text/plain instead.
if 'MSIE' in handler.request.headers.get('user-agent'):
content_type = 'text/plain; charset=utf-8'
else:
content_type = 'application/javascript; charset=utf-8'
handler.response.headers['Content-Type'] = content_type
handler.response.headers['X-Content-Type-Options'] = 'nosniff'
response = {}
response['status'] = status_code
response['message'] = message
handler.response.write(_JSON_XSSI_PREFIX + dumps(response))
class JsonFile(object):
"""A streaming file-ish interface for JSON content.
Usage:
writer = JsonFile('path')
writer.open('w')
writer.write(json_serializable_python_object) # We serialize for you.
writer.write(another_json_serializable_python_object)
writer.close() # Must close before read.
reader = JsonFile('path')
reader.open('r') # Only 'r' and 'w' are supported.
for entity in reader:
do_something_with(entity) # We deserialize back to Python for you.
self.reader.reset() # Reset read pointer to head.
contents = self.reader.read() # Returns {'rows': [...]}.
for entity in contents['rows']:
do_something_with(entity) # Again, deserialized back to Python.
reader.close()
with syntax is not supported. Cannot be used inside the App Engine
container where the filesystem is read-only.
Internally, each call to write will take a Python object, serialize it, and
write the contents as one line to the json file. On __iter__ we deserialize
one line at a time, generator-style, to avoid OOM unless serialization/de-
serialization of one object exhausts memory.
"""
# When writing to files use \n instead of os.linesep; see
# http://docs.python.org/2/library/os.html.
_LINE_TEMPLATE = ',\n %s'
_MODE_READ = 'r'
_MODE_WRITE = 'w'
_MODES = frozenset([_MODE_READ, _MODE_WRITE])
_PREFIX = '{"rows": ['
_SUFFIX = ']}'
def __init__(self, path):
self._first = True
self._file = None
self._path = path
def __iter__(self):
assert self._file
return self
def close(self):
"""Closes the file; must close before read."""
assert self._file
if not self._file.closed: # Like file, allow multiple close calls.
if self.mode == self._MODE_WRITE:
self._file.write('\n' + self._SUFFIX)
self._file.close()
@property
def mode(self):
"""Returns the mode the file was opened in."""
assert self._file
return self._file.mode
@property
def name(self):
"""Returns string name of the file."""
assert self._file
return self._file.name
def next(self):
"""Retrieves the next line and deserializes it into a Python object."""
assert self._file
line = self._file.readline()
if line.startswith(self._PREFIX):
line = self._file.readline()
if line.endswith(self._SUFFIX):
raise StopIteration()
line = line.strip()
if line.endswith(','):
line = line[:-1]
return loads(line)
def open(self, mode):
"""Opens the file in the given mode string ('r, 'w' only)."""
assert not self._file
assert mode in self._MODES
self._file = open(self._path, mode)
if self.mode == self._MODE_WRITE:
self._file.write(self._PREFIX)
def read(self):
"""Reads the file into a single Python object; may exhaust memory.
Returns:
dict. Format: {'rows': [...]} where the value is a list of de-
serialized objects passed to write.
"""
assert self._file
return loads(self._file.read())
def reset(self):
"""Resets file's position to head."""
assert self._file
self._file.seek(0)
def write(self, python_object):
"""Writes serialized JSON representation of python_object to file.
Args:
python_object: object. Contents to write. Must be JSON-serializable.
Raises:
ValueError: if python_object cannot be JSON-serialized.
"""
assert self._file
template = self._LINE_TEMPLATE
if self._first:
template = template[1:]
self._first = False
self._file.write(template % dumps(python_object))
def run_all_unit_tests():
"""Runs all unit tests."""
assert value_to_string(True, bool) == 'True'
assert value_to_string(False, bool) == 'False'
assert value_to_string(None, bool) == 'False'
assert string_to_value('True', bool)
assert string_to_value('1', bool)
assert string_to_value(1, bool)
assert not string_to_value('False', bool)
assert not string_to_value('0', bool)
assert not string_to_value('5', bool)
assert not string_to_value(0, bool)
assert not string_to_value(5, bool)
assert not string_to_value(None, bool)
assert string_to_value('15', int) == 15
assert string_to_value(15, int) == 15
assert string_to_value(None, int) == 0
assert string_to_value('foo', str) == 'foo'
assert string_to_value(None, str) == str('')
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing long running jobs."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from datetime import datetime
import logging
import time
import traceback
import entities
import transforms
from google.appengine.api import namespace_manager
from google.appengine.ext import db
from google.appengine.ext import deferred
# A job can be in one of these states.
STATUS_CODE_NONE = 0
STATUS_CODE_STARTED = 1
STATUS_CODE_COMPLETED = 2
STATUS_CODE_FAILED = 3
class DurableJob(object):
"""A class that represents a deferred durable job at runtime."""
def __init__(self, app_context):
self._namespace = app_context.get_namespace_name()
self._job_name = 'job-%s-%s' % (
self.__class__.__name__, self._namespace)
def run(self):
"""Override this method to provide actual business logic."""
def main(self):
"""Main method of the deferred task."""
logging.info('Job started: %s', self._job_name)
time_started = time.time()
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._namespace)
try:
result = self.run()
DurableJobEntity.complete_job(
self._job_name, transforms.dumps(result),
long(time.time() - time_started))
logging.info('Job completed: %s', self._job_name)
except Exception as e:
logging.error(traceback.format_exc())
logging.error('Job failed: %s\n%s', self._job_name, e)
DurableJobEntity.fail_job(
self._job_name, traceback.format_exc(),
long(time.time() - time_started))
raise deferred.PermanentTaskFailure(e)
finally:
namespace_manager.set_namespace(old_namespace)
def submit(self):
"""Submits this job for deferred execution."""
DurableJobEntity.create_job(self._job_name)
deferred.defer(self.main)
def load(self):
"""Loads the last known state of this job from the datastore."""
return DurableJobEntity.get_by_name(self._job_name)
class DurableJobEntity(entities.BaseEntity):
"""A class that represents a persistent database entity of durable job."""
updated_on = db.DateTimeProperty(indexed=True)
execution_time_sec = db.IntegerProperty(indexed=False)
status_code = db.IntegerProperty(indexed=False)
output = db.TextProperty(indexed=False)
@classmethod
def get_by_name(cls, name):
return DurableJobEntity.get_by_key_name(name)
@classmethod
def update(cls, name, status_code, output, execution_time_sec):
"""Updates job state in a datastore."""
def mutation():
job = DurableJobEntity.get_by_name(name)
if not job:
logging.error('Job was not started or was deleted: %s', name)
return
job.updated_on = datetime.now()
job.execution_time_sec = execution_time_sec
job.status_code = status_code
job.output = output
job.put()
db.run_in_transaction(mutation)
@classmethod
def create_job(cls, name):
"""Creates new or reset a state of existing job in a datastore."""
def mutation():
job = DurableJobEntity.get_by_name(name)
if not job:
job = DurableJobEntity(key_name=name)
job.updated_on = datetime.now()
job.execution_time_sec = 0
job.status_code = STATUS_CODE_NONE
job.output = None
job.put()
db.run_in_transaction(mutation)
@classmethod
def start_job(cls, name):
return cls.update(name, STATUS_CODE_STARTED, None, 0)
@classmethod
def complete_job(cls, name, output, execution_time_sec):
return cls.update(
name, STATUS_CODE_COMPLETED, output, execution_time_sec)
@classmethod
def fail_job(cls, name, output, execution_time_sec):
return cls.update(name, STATUS_CODE_FAILED, output, execution_time_sec)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models and helper utilities for the review workflow."""
__author__ = [
'johncox@google.com (John Cox)',
'sll@google.com (Sean Lip)',
]
import entities
import models
import transforms
from google.appengine.ext import db
class KeyProperty(db.StringProperty):
"""A property that stores a datastore key.
App Engine's db.ReferenceProperty is dangerous because accessing a
ReferenceProperty on a model instance implicitly causes an RPC. We always
want to know about and be in control of our RPCs, so we use this property
instead, store a key, and manually make datastore calls when necessary.
This is analogous to the approach ndb takes, and it also allows us to do
validation against a key's kind (see __init__).
Keys are stored as indexed strings internally. Usage:
class Foo(db.Model):
pass
class Bar(db.Model):
foo_key = KeyProperty(kind=Foo) # Validates key is of kind 'Foo'.
foo_key = Foo().put()
bar = Bar(foo_key=foo_key)
bar_key = bar.put()
foo = db.get(bar.foo_key)
"""
def __init__(self, *args, **kwargs):
"""Constructs a new KeyProperty.
Args:
*args: positional arguments passed to superclass.
**kwargs: keyword arguments passed to superclass. Additionally may
contain kind, which if passed will be a string used to validate
key kind. If omitted, any kind is considered valid.
"""
kind = kwargs.pop('kind', None)
super(KeyProperty, self).__init__(*args, **kwargs)
self._kind = kind
def validate(self, value):
"""Validates passed db.Key value, validating kind passed to ctor."""
super(KeyProperty, self).validate(str(value))
if value is None: # Nones are valid iff they pass the parent validator.
return value
if not isinstance(value, db.Key):
raise db.BadValueError(
'Value must be of type db.Key; got %s' % type(value))
if self._kind and value.kind() != self._kind:
raise db.BadValueError(
'Key must be of kind %s; was %s' % (self._kind, value.kind()))
return value
# For many classes we define both a _DomainObject subclass and a db.Model.
# When possible it is best to use the domain object, since db.Model carries with
# it the datastore API and allows clients to bypass business logic by making
# direct datastore calls.
class BaseEntity(entities.BaseEntity):
"""Abstract base entity for models related to reviews."""
@classmethod
def key_name(cls):
"""Returns a key_name for use with cls's constructor."""
raise NotImplementedError
class Review(BaseEntity):
"""Datastore model for a student review of a Submission."""
# Contents of the student's review. Max size is 1MB.
contents = db.TextProperty()
# Key of the Student who wrote this review.
reviewer_key = KeyProperty(kind=models.Student.kind())
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new Review."""
assert not kwargs.get('key_name'), (
'Setting key_name manually is not supported')
reviewer_key = kwargs.get('reviewer_key')
unit_id = kwargs.get('unit_id')
assert reviewer_key, 'Missing required property: reviewer_key'
assert unit_id, 'Missing required_property: unit_id'
kwargs['key_name'] = self.key_name(unit_id, reviewer_key)
super(Review, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, unit_id, reviewer_key):
"""Creates a key_name string for datastore operations.
In order to work with the review subsystem, entities must have a key
name populated from this method.
Args:
unit_id: string. The id of the unit this review belongs to.
reviewer_key: db.Key of models.models.Student. The author of this
the review.
Returns:
String.
"""
return '(review:%s:%s)' % (unit_id, reviewer_key)
class Submission(BaseEntity):
"""Datastore model for a student work submission."""
# Contents of the student submission. Max size is 1MB.
contents = db.TextProperty()
# Key of the Student who wrote this submission.
reviewee_key = KeyProperty(kind=models.Student.kind())
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new Submission."""
assert not kwargs.get('key_name'), (
'Setting key_name manually is not supported')
reviewee_key = kwargs.get('reviewee_key')
unit_id = kwargs.get('unit_id')
assert reviewee_key, 'Missing required property: reviewee_key'
assert unit_id, 'Missing required_property: unit_id'
kwargs['key_name'] = self.key_name(unit_id, reviewee_key)
super(Submission, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, unit_id, reviewee_key):
"""Creates a key_name string for datastore operations.
In order to work with the review subsystem, entities must have a key
name populated from this method.
Args:
unit_id: string. The id of the unit this submission belongs to.
reviewee_key: db.Key of models.models.Student. The author of the
the submission.
Returns:
String.
"""
return '(submission:%s:%s)' % (unit_id, reviewee_key.id_or_name())
@classmethod
def get_key(cls, unit_id, reviewee_key):
"""Returns a db,Key for a submission."""
return db.Key.from_path(
cls.kind(), cls.key_name(unit_id, reviewee_key))
@classmethod
def write(cls, unit_id, reviewee_key, contents):
"""Updates or creates a student submission, and returns the key.
Args:
unit_id: string. The id of the unit this submission belongs to.
reviewee_key: db.Key of models.models.Student. The author of the
submission.
contents: object. The contents of the submission, as a Python
object. This will be JSON-transformed before it is stored.
Returns:
db.Key of Submission.
"""
return cls(
unit_id=str(unit_id), reviewee_key=reviewee_key,
contents=transforms.dumps(contents)
).put()
@classmethod
def get_contents(cls, unit_id, reviewee_key):
"""Returns the de-JSONified contents of a submission."""
submission_key = cls.get_key(unit_id, reviewee_key)
return cls.get_contents_by_key(submission_key)
@classmethod
def get_contents_by_key(cls, submission_key):
"""Returns the contents of a submission, given a db.Key."""
submission = entities.get(submission_key)
return transforms.loads(submission.contents) if submission else None
class StudentWorkUtils(object):
"""A utility class for processing student work objects."""
@classmethod
def get_answer_list(cls, submission):
"""Compiles a list of the student's answers from a submission."""
if not submission:
return []
answer_list = []
for item in submission:
# Check that the indices within the submission are valid.
assert item['index'] == len(answer_list)
answer_list.append(item['value'])
return answer_list
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing persistent entities."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from counters import PerfCounter
from google.appengine.ext import db
# datastore performance counters
DB_QUERY = PerfCounter(
'gcb-models-db-query',
'A number of times a query()/all() was executed on a datastore.')
DB_GET = PerfCounter(
'gcb-models-db-get',
'A number of times an object was fetched from datastore.')
DB_PUT = PerfCounter(
'gcb-models-db-put',
'A number of times an object was put into datastore.')
DB_DELETE = PerfCounter(
'gcb-models-db-delete',
'A number of times an object was deleted from datastore.')
def delete(keys):
"""Wrapper around db.delete that counts entities we attempted to get."""
DB_DELETE.inc(increment=_count(keys))
return db.delete(keys)
def get(keys):
"""Wrapper around db.get that counts entities we attempted to get."""
DB_GET.inc(increment=_count(keys))
return db.get(keys)
def put(keys):
"""Wrapper around db.put that counts entities we attempted to put."""
DB_PUT.inc(increment=_count(keys))
return db.put(keys)
def _count(keys):
# App engine accepts key or list of key; count entities found.
return len(keys) if isinstance(keys, (list, tuple)) else 1
class BaseEntity(db.Model):
"""A common class to all datastore entities."""
@classmethod
def all(cls, **kwds):
DB_QUERY.inc()
return super(BaseEntity, cls).all(**kwds)
@classmethod
def get(cls, keys):
DB_GET.inc()
return super(BaseEntity, cls).get(keys)
@classmethod
def get_by_key_name(cls, key_names):
DB_GET.inc()
return super(BaseEntity, cls).get_by_key_name(key_names)
def put(self):
DB_PUT.inc()
return super(BaseEntity, self).put()
def delete(self):
DB_DELETE.inc()
super(BaseEntity, self).delete()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages performance counters of an application and/or its modules."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
def incr_counter_global_value(unused_name, unused_delta):
"""Hook method for global aggregation."""
pass
def get_counter_global_value(unused_name):
"""Hook method for global aggregation."""
return None
class PerfCounter(object):
"""A generic, in-process integer counter."""
def __init__(self, name, doc_string):
self._name = name
self._doc_string = doc_string
self._value = 0
Registry.registered[self.name] = self
def inc(
self, increment=1, context=None): # pylint: disable-msg=unused-argument
"""Increments value by a given increment."""
self._value += increment
incr_counter_global_value(self.name, increment)
@property
def name(self):
return self._name
@property
def doc_string(self):
return self._doc_string
@property
def value(self):
"""Value for this process only."""
return self._value
@property
def global_value(self):
"""Value aggregated across all processes."""
return get_counter_global_value(self.name)
class Registry(object):
"""Holds all registered counters."""
registered = {}
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core data model classes."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import logging
import appengine_config
from config import ConfigProperty
import counters
from counters import PerfCounter
from entities import BaseEntity
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
# We want to use memcache for both objects that exist and do not exist in the
# datastore. If object exists we cache its instance, if object does not exist
# we cache this object below.
NO_OBJECT = {}
# The default amount of time to cache the items for in memcache.
DEFAULT_CACHE_TTL_SECS = 60 * 5
# Global memcache controls.
CAN_USE_MEMCACHE = ConfigProperty(
'gcb_can_use_memcache', bool, (
'Whether or not to cache various objects in memcache. For production '
'this value should be on to enable maximum performance. For '
'development this value should be off so you can see your changes to '
'course content instantaneously.'),
appengine_config.PRODUCTION_MODE)
# performance counters
CACHE_PUT = PerfCounter(
'gcb-models-cache-put',
'A number of times an object was put into memcache.')
CACHE_HIT = PerfCounter(
'gcb-models-cache-hit',
'A number of times an object was found in memcache.')
CACHE_MISS = PerfCounter(
'gcb-models-cache-miss',
'A number of times an object was not found in memcache.')
CACHE_DELETE = PerfCounter(
'gcb-models-cache-delete',
'A number of times an object was deleted from memcache.')
class MemcacheManager(object):
"""Class that consolidates all memcache operations."""
@classmethod
def get(cls, key, namespace=None):
"""Gets an item from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return None
if not namespace:
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
value = memcache.get(key, namespace=namespace)
# We store some objects in memcache that don't evaluate to True, but are
# real objects, '{}' for example. Count a cache miss only in a case when
# an object is None.
if value != None: # pylint: disable-msg=g-equals-none
CACHE_HIT.inc()
else:
logging.info('Cache miss, key: %s. %s', key, Exception())
CACHE_MISS.inc(context=key)
return value
@classmethod
def set(cls, key, value, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None):
"""Sets an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
CACHE_PUT.inc()
if not namespace:
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
memcache.set(key, value, ttl, namespace=namespace)
@classmethod
def incr(cls, key, delta, namespace=None):
"""Incr an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
if not namespace:
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
memcache.incr(key, delta, namespace=namespace, initial_value=0)
@classmethod
def delete(cls, key, namespace=None):
"""Deletes an item from memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc()
if not namespace:
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
memcache.delete(key, namespace=namespace)
CAN_AGGREGATE_COUNTERS = ConfigProperty(
'gcb_can_aggregate_counters', bool,
'Whether or not to aggregate and record counter values in memcache. '
'This allows you to see counter values aggregated across all frontend '
'application instances. Without recording, you only see counter values '
'for one frontend instance you are connected to right now. Enabling '
'aggregation improves quality of performance metrics, but adds a small '
'amount of latency to all your requests.',
default_value=False)
def incr_counter_global_value(name, delta):
if CAN_AGGREGATE_COUNTERS.value:
MemcacheManager.incr('counter:' + name, delta)
def get_counter_global_value(name):
if CAN_AGGREGATE_COUNTERS.value:
return MemcacheManager.get('counter:' + name)
else:
return None
counters.get_counter_global_value = get_counter_global_value
counters.incr_counter_global_value = incr_counter_global_value
class Student(BaseEntity):
"""Student profile."""
enrolled_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
user_id = db.StringProperty(indexed=True)
name = db.StringProperty(indexed=False)
additional_fields = db.TextProperty(indexed=False)
is_enrolled = db.BooleanProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
scores = db.TextProperty(indexed=False)
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student:%s' % key
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(Student, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Student, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def get_by_email(cls, email):
return Student.get_by_key_name(email.encode('utf8'))
@classmethod
def get_enrolled_student_by_email(cls, email):
"""Returns enrolled student or None."""
student = MemcacheManager.get(cls._memcache_key(email))
if NO_OBJECT == student:
return None
if not student:
student = Student.get_by_email(email)
if student:
MemcacheManager.set(cls._memcache_key(email), student)
else:
MemcacheManager.set(cls._memcache_key(email), NO_OBJECT)
if student and student.is_enrolled:
return student
else:
return None
@classmethod
def rename_current(cls, new_name):
"""Gives student a new name."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
if new_name:
student = Student.get_by_email(user.email())
if not student:
raise Exception('Student instance corresponding to user %s not '
'found.' % user.email())
student.name = new_name
student.put()
@classmethod
def set_enrollment_status_for_current(cls, is_enrolled):
"""Changes student enrollment status."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
student = Student.get_by_email(user.email())
if not student:
raise Exception('Student instance corresponding to user %s not '
'found.' % user.email())
student.is_enrolled = is_enrolled
student.put()
def get_key(self):
if not self.user_id:
raise Exception('Student instance has no user_id set.')
return db.Key.from_path(Student.kind(), self.user_id)
@classmethod
def get_student_by_user_id(cls, user_id):
students = cls.all().filter(cls.user_id.name, user_id).fetch(limit=2)
if len(students) == 2:
raise Exception(
'There is more than one student with user_id %s' % user_id)
return students[0] if students else None
def has_same_key_as(self, key):
"""Checks if the key of the student and the given key are equal."""
return key == self.get_key()
class EventEntity(BaseEntity):
"""Generic events.
Each event has a 'source' that defines a place in a code where the event was
recorded. Each event has a 'user_id' to represent an actor who triggered
the event. The event 'data' is a JSON object, the format of which is defined
elsewhere and depends on the type of the event.
"""
recorded_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
source = db.StringProperty(indexed=False)
user_id = db.StringProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
@classmethod
def record(cls, source, user, data):
"""Records new event into a datastore."""
event = EventEntity()
event.source = source
event.user_id = user.user_id()
event.data = data
event.put()
class StudentAnswersEntity(BaseEntity):
"""Student answers to the assessments."""
updated_on = db.DateTimeProperty(indexed=True)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
class StudentPropertyEntity(BaseEntity):
"""A property of a student, keyed by the string STUDENT_ID-PROPERTY_NAME."""
updated_on = db.DateTimeProperty(indexed=True)
name = db.StringProperty()
# Each of the following is a string representation of a JSON dict.
value = db.TextProperty()
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student_property:%s' % key
@classmethod
def create_key(cls, student_id, property_name):
return '%s-%s' % (student_id, property_name)
@classmethod
def create(cls, student, property_name):
return StudentPropertyEntity(
key_name=cls.create_key(student.user_id, property_name),
name=property_name)
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(StudentPropertyEntity, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Student, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def get(cls, student, property_name):
"""Loads student property."""
key = cls.create_key(student.user_id, property_name)
value = MemcacheManager.get(cls._memcache_key(key))
if NO_OBJECT == value:
return None
if not value:
value = cls.get_by_key_name(key)
if value:
MemcacheManager.set(cls._memcache_key(key), value)
else:
MemcacheManager.set(cls._memcache_key(key), NO_OBJECT)
return value
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Sending notifications."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
from google.appengine.api import mail
from google.appengine.api import users
class EmailManager(object):
"""Notification Manager. Sends emails out."""
def __init__(self, course):
self._course = course
self._user = users.get_current_user()
def send_mail(self, subject, body, reciever):
"""send email."""
message = mail.EmailMessage()
message.sender = self._user.email()
message.to = self._user.email()
message.bcc = reciever
message.subject = subject
message.html = body
message.send()
return True
def send_announcement(self, subject, body):
"""Send an announcement to course announcement list."""
announce_email = self._course.get_course_announcement_list_email()
if announce_email:
return self.send_mail(subject, body, announce_email)
return False
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages mapping of users to roles and roles to privileges."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import config
from google.appengine.api import users
GCB_ADMIN_LIST = config.ConfigProperty(
'gcb_admin_user_emails', str, (
'A list of email addresses for super-admin users. '
'WARNING! Super-admin users have the highest level of access to your '
'Google App Engine instance and to all data about all courses and '
'students within that instance. Be very careful when modifying this '
'property. Syntax: Surround each email address with [ and ]; for '
'example, [test@example.com]. Separate the entries with either a new '
'line or a space. Do not use regular expressions.'),
'', multiline=True)
KEY_COURSE = 'course'
KEY_ADMIN_USER_EMAILS = 'admin_user_emails'
class Roles(object):
"""A class that provides information about user roles."""
@classmethod
def is_direct_super_admin(cls):
"""Checks if current user is a super admin, without delegation."""
return users.get_current_user() and users.is_current_user_admin()
@classmethod
def is_super_admin(cls):
"""Checks if current user is a super admin, possibly via delegation."""
if cls.is_direct_super_admin():
return True
user = users.get_current_user()
if user and '[%s]' % user.email() in GCB_ADMIN_LIST.value:
return True
return False
@classmethod
def is_course_admin(cls, app_context):
"""Checks if a user is a course admin, possibly via delegation."""
if cls.is_super_admin():
return True
if KEY_COURSE in app_context.get_environ():
environ = app_context.get_environ()[KEY_COURSE]
if KEY_ADMIN_USER_EMAILS in environ:
allowed = environ[KEY_ADMIN_USER_EMAILS]
user = users.get_current_user()
if allowed and user and '[%s]' % user.email() in allowed:
return True
return False
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import copy
from datetime import datetime
import logging
import os
import pickle
import sys
import appengine_config
from common.schema_fields import FieldRegistry
from common.schema_fields import SchemaField
import common.tags
from tools import verify
import yaml
from models import MemcacheManager
import progress
import review
import transforms
import vfs
COURSE_MODEL_VERSION_1_2 = '1.2'
COURSE_MODEL_VERSION_1_3 = '1.3'
# Date format string for validating input in ISO 8601 format without a
# timezone. All such strings are assumed to refer to UTC datetimes.
# Example: '2013-03-21 13:00'
ISO_8601_DATE_FORMAT = '%Y-%m-%d %H:%M'
def deep_dict_merge(real_values_dict, default_values_dict):
"""Merges default and real value dictionaries recursively."""
def _deep_merge(real_values, default_values):
"""Updates real with default values recursively."""
# Recursively merge dictionaries.
for key, value in real_values.items():
default_value = default_values.get(key)
if (default_value and isinstance(
value, dict) and isinstance(default_value, dict)):
_deep_merge(value, default_value)
# Copy over other values.
for key, value in default_values.items():
if key not in real_values:
real_values[key] = value
result = {}
if real_values_dict:
result = copy.deepcopy(real_values_dict)
_deep_merge(result, default_values_dict)
return result
# Here are the defaults for a new course.
DEFAULT_COURSE_YAML_DICT = {
'course': {
'title': 'UNTITLED COURSE',
'locale': 'en_US',
'main_image': {},
'now_available': False},
'base': {
'show_gplus_button': True},
'institution': {
'logo': {},
'url': ''},
'preview': {},
'unit': {},
'reg_form': {
'can_register': True,
'additional_registration_fields': (
'<!-- reg_form.additional_registration_fields -->')}
}
# Here are the defaults for an existing course.
DEFAULT_EXISTING_COURSE_YAML_DICT = deep_dict_merge(
{'course': {
'now_available': True}},
DEFAULT_COURSE_YAML_DICT)
# Here is the default course.yaml for a new course.
EMPTY_COURSE_YAML = u"""# my new course.yaml
course:
title: 'New Course by %s'
now_available: False
"""
# Here are the default assessment weights corresponding to the sample course.
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS = {'Pre': 0, 'Mid': 30, 'Fin': 70}
# Indicates that an assessment is graded automatically.
AUTO_GRADER = 'auto'
# Indicates that an assessment is graded by a human.
HUMAN_GRADER = 'human'
# Allowed graders.
ALLOWED_GRADERS = [AUTO_GRADER, HUMAN_GRADER]
# Keys in unit.workflow (when it is converted to a dict).
GRADER_KEY = 'grader'
MATCHER_KEY = 'matcher'
SUBMISSION_DUE_DATE_KEY = 'submission_due_date'
REVIEW_DUE_DATE_KEY = 'review_due_date'
REVIEW_MIN_COUNT_KEY = 'review_min_count'
REVIEW_WINDOW_MINS_KEY = 'review_window_mins'
DEFAULT_REVIEW_MIN_COUNT = 2
DEFAULT_REVIEW_WINDOW_MINS = 60
# Keys specific to human-graded assessments.
HUMAN_GRADED_ASSESSMENT_KEY_LIST = [
MATCHER_KEY, REVIEW_MIN_COUNT_KEY, REVIEW_WINDOW_MINS_KEY,
SUBMISSION_DUE_DATE_KEY, REVIEW_DUE_DATE_KEY
]
# The name for the peer review assessment used in the sample v1.2 CSV file.
# This is here so that a peer review assessment example is available when
# Course Builder loads with the sample course. However, in general, peer
# review assessments should only be specified in Course Builder v1.4 or
# later (via the web interface).
LEGACY_REVIEW_ASSESSMENT = 'ReviewAssessmentExample'
# This value is the default workflow for assessment grading,
DEFAULT_AUTO_GRADER_WORKFLOW = yaml.safe_dump({
GRADER_KEY: AUTO_GRADER
}, default_flow_style=False)
# This value is meant to be used only for the human-reviewed assessments in the
# sample v1.2 Power Searching course.
LEGACY_HUMAN_GRADER_WORKFLOW = yaml.safe_dump({
GRADER_KEY: HUMAN_GRADER,
MATCHER_KEY: review.PEER_MATCHER,
SUBMISSION_DUE_DATE_KEY: '2014-03-14 12:00',
REVIEW_DUE_DATE_KEY: '2014-03-21 12:00',
REVIEW_MIN_COUNT_KEY: DEFAULT_REVIEW_MIN_COUNT,
REVIEW_WINDOW_MINS_KEY: DEFAULT_REVIEW_WINDOW_MINS,
}, default_flow_style=False)
def is_editable_fs(app_context):
return isinstance(app_context.fs.impl, vfs.DatastoreBackedFileSystem)
def copy_attributes(source, target, converter):
"""Copies source object attributes into a target using a converter."""
for source_name, value in converter.items():
if value:
target_name = value[0]
target_type = value[1]
setattr(
target, target_name, target_type(getattr(source, source_name)))
def load_csv_course(app_context):
"""Loads course data from the CSV files."""
logging.info('Initializing datastore from CSV files.')
unit_file = os.path.join(app_context.get_data_home(), 'unit.csv')
lesson_file = os.path.join(app_context.get_data_home(), 'lesson.csv')
# Check files exist.
if (not app_context.fs.isfile(unit_file) or
not app_context.fs.isfile(lesson_file)):
return None, None
unit_stream = app_context.fs.open(unit_file)
lesson_stream = app_context.fs.open(lesson_file)
# Verify CSV file integrity.
units = verify.read_objects_from_csv_stream(
unit_stream, verify.UNITS_HEADER, verify.Unit)
lessons = verify.read_objects_from_csv_stream(
lesson_stream, verify.LESSONS_HEADER, verify.Lesson)
verifier = verify.Verifier()
verifier.verify_unit_fields(units)
verifier.verify_lesson_fields(lessons)
verifier.verify_unit_lesson_relationships(units, lessons)
assert verifier.errors == 0
assert verifier.warnings == 0
# Load data from CSV files into a datastore.
units = verify.read_objects_from_csv_stream(
app_context.fs.open(unit_file), verify.UNITS_HEADER, Unit12,
converter=verify.UNIT_CSV_TO_DB_CONVERTER)
lessons = verify.read_objects_from_csv_stream(
app_context.fs.open(lesson_file), verify.LESSONS_HEADER, Lesson12,
converter=verify.LESSON_CSV_TO_DB_CONVERTER)
return units, lessons
def index_units_and_lessons(course):
"""Index all 'U' type units and their lessons. Indexes are 1-based."""
unit_index = 1
for unit in course.get_units():
if verify.UNIT_TYPE_UNIT == unit.type:
unit._index = unit_index # pylint: disable-msg=protected-access
unit_index += 1
lesson_index = 1
for lesson in course.get_lessons(unit.unit_id):
lesson._index = ( # pylint: disable-msg=protected-access
lesson_index)
lesson_index += 1
def create_course_registry():
"""Create the registry for course properties."""
reg = FieldRegistry('Basic Course Settings', description='Course Settings')
# Course level settings.
course_opts = reg.add_sub_registry('course', 'Course Config')
course_opts.add_property(
SchemaField('course:title', 'Course Name', 'string'))
course_opts.add_property(
SchemaField(
'course:admin_user_emails', 'Course Admin Emails', 'string',
description='A space-separated list of email addresses of course '
'administrators. Each email address must be placed between \'[\' '
'and \']\'.'))
course_opts.add_property(
SchemaField(
'course:forum_email', 'Forum Email', 'string', optional=True,
description='Email for the forum, e.g. '
'\'My-Course@googlegroups.com\'.'))
course_opts.add_property(SchemaField(
'course:announcement_list_email', 'Announcement List Email', 'string',
optional=True, description='Email for the mailing list where students '
'can register to receive course announcements, e.g. '
'\'My-Course-Announce@googlegroups.com\''))
course_opts.add_property(SchemaField('course:locale', 'Locale', 'string'))
course_opts.add_property(SchemaField(
'course:start_date', 'Course Start Date', 'string', optional=True))
course_opts.add_property(SchemaField(
'course:now_available', 'Make Course Available', 'boolean'))
# Course registration settings.
reg_opts = reg.add_sub_registry('reg_form', 'Student Registration Options')
reg_opts.add_property(SchemaField(
'reg_form:can_register', 'Enable Registrations', 'boolean',
description='Checking this box allows new students to register for '
'the course.'))
# Course homepage settings.
homepage_opts = reg.add_sub_registry('homepage', 'Homepage Settings')
homepage_opts.add_property(SchemaField(
'course:instructor_details', 'Instructor Details', 'html',
optional=True))
homepage_opts.add_property(SchemaField(
'course:blurb', 'Course Abstract', 'html', optional=True,
description='Text, shown on the course homepage, that explains what '
'the course is about.',
extra_schema_dict_values={
'supportCustomTags': common.tags.CAN_USE_DYNAMIC_TAGS.value}))
homepage_opts.add_property(SchemaField(
'course:main_video:url', 'Course Video', 'url', optional=True,
description='URL for the preview video shown on the course homepage.'))
homepage_opts.add_property(SchemaField(
'course:main_image:url', 'Course Image', 'string', optional=True,
description='URL for the preview image shown on the course homepage. '
'This will only be shown if no course video is specified.'))
homepage_opts.add_property(SchemaField(
'course:main_image:alt_text', 'Alternate Text', 'string',
optional=True,
description='Alt text for the preview image on the course homepage.'))
return reg
class AbstractCachedObject(object):
"""Abstract serializable versioned object that can stored in memcache."""
@classmethod
def _make_key(cls):
# The course content files may change between deployment. To avoid
# reading old cached values by the new version of the application we
# add deployment version to the key. Now each version of the
# application can put/get its own version of the course and the
# deployment.
return 'course:model:pickle:%s:%s' % (
cls.VERSION, os.environ.get('CURRENT_VERSION_ID'))
@classmethod
def new_memento(cls):
"""Creates new empty memento instance; must be pickle serializable."""
raise Exception('Not implemented')
@classmethod
def instance_from_memento(cls, unused_app_context, unused_memento):
"""Creates instance from serializable memento."""
raise Exception('Not implemented')
@classmethod
def memento_from_instance(cls, unused_instance):
"""Creates serializable memento from instance."""
raise Exception('Not implemented')
@classmethod
def load(cls, app_context):
"""Loads instance from memcache; does not fail on errors."""
try:
binary_data = MemcacheManager.get(
cls._make_key(),
namespace=app_context.get_namespace_name())
if binary_data:
memento = cls.new_memento()
memento.deserialize(binary_data)
return cls.instance_from_memento(app_context, memento)
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to load object \'%s\' from memcache. %s',
cls._make_key(), e)
return None
@classmethod
def save(cls, app_context, instance):
"""Saves instance to memcache."""
MemcacheManager.set(
cls._make_key(),
cls.memento_from_instance(instance).serialize(),
namespace=app_context.get_namespace_name())
@classmethod
def delete(cls, app_context):
"""Deletes instance from memcache."""
MemcacheManager.delete(
cls._make_key(),
namespace=app_context.get_namespace_name())
def serialize(self):
"""Saves instance to a pickle representation."""
return pickle.dumps(self.__dict__)
def deserialize(self, binary_data):
"""Loads instance from a pickle representation."""
adict = pickle.loads(binary_data)
if self.version != adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self.__dict__.update(adict)
class Unit12(object):
"""An object to represent a Unit, Assessment or Link (version 1.2)."""
def __init__(self):
self.unit_id = '' # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
@property
def href(self):
assert verify.UNIT_TYPE_LINK == self.type
return self.unit_id
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
@property
def workflow_yaml(self):
"""Returns the workflow as a YAML text string."""
assert verify.UNIT_TYPE_ASSESSMENT == self.type
if self.unit_id == LEGACY_REVIEW_ASSESSMENT:
return LEGACY_HUMAN_GRADER_WORKFLOW
else:
return DEFAULT_AUTO_GRADER_WORKFLOW
@property
def workflow(self):
"""Returns the workflow as an object."""
return Workflow(self.workflow_yaml)
class Lesson12(object):
"""An object to represent a Lesson (version 1.2)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.activity = ''
self.activity_title = ''
self.activity_listed = True
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def now_available(self):
return True
@property
def index(self):
return self._index
class CachedCourse12(AbstractCachedObject):
"""A representation of a Course12 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_2
def __init__(self, units=None, lessons=None, unit_id_to_lessons=None):
self.version = self.VERSION
self.units = units
self.lessons = lessons
self.unit_id_to_lessons = unit_id_to_lessons
@classmethod
def new_memento(cls):
return CachedCourse12()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel12(
app_context, units=memento.units, lessons=memento.lessons,
unit_id_to_lessons=memento.unit_id_to_lessons)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse12(
units=course.units, lessons=course.lessons,
unit_id_to_lessons=course.unit_id_to_lessons)
class CourseModel12(object):
"""A course defined in terms of CSV files (version 1.2)."""
VERSION = COURSE_MODEL_VERSION_1_2
@classmethod
def load(cls, app_context):
"""Loads course data into a model."""
course = CachedCourse12.load(app_context)
if not course:
units, lessons = load_csv_course(app_context)
if units and lessons:
course = CourseModel12(app_context, units, lessons)
if course:
CachedCourse12.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lessons = {}
for lesson in lessons:
key = str(lesson.unit_id)
if key not in unit_id_to_lessons:
unit_id_to_lessons[key] = []
unit_id_to_lessons[key].append(lesson)
return unit_id_to_lessons
def __init__(
self, app_context,
units=None, lessons=None, unit_id_to_lessons=None):
self._app_context = app_context
self._units = []
self._lessons = []
self._unit_id_to_lessons = {}
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lessons:
self._unit_id_to_lessons = unit_id_to_lessons
else:
self._unit_id_to_lessons = (
self._make_unit_id_to_lessons_lookup_dict(self._lessons))
index_units_and_lessons(self)
@property
def app_context(self):
return self._app_context
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lessons(self):
return self._unit_id_to_lessons
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
return self._unit_id_to_lessons.get(str(unit_id), [])
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def get_review_form_filename(self, unit_id):
"""Returns the corresponding review form filename."""
return 'assets/js/review-%s.js' % unit_id
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit and verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def _get_assessment_as_dict(self, filename):
"""Returns the Python dict representation of an assessment file."""
root_name = 'assessment'
context = self._app_context
assessment_content = context.fs.impl.get(os.path.join(
context.get_home(), filename)).read()
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
return assessment
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._get_assessment_as_dict(
self.get_assessment_filename(unit.unit_id))
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._get_assessment_as_dict(
self.get_review_form_filename(unit.unit_id))
def get_activity_filename(self, unit_id, lesson_id):
"""Returns activity base filename."""
return 'assets/js/activity-%s.%s.js' % (unit_id, lesson_id)
def find_lesson_by_id(self, unit, lesson_id):
"""Finds a lesson given its id (or 1-based index in this model)."""
index = int(lesson_id) - 1
return self.get_lessons(unit.unit_id)[index]
def to_json(self):
"""Creates JSON representation of this instance."""
adict = copy.deepcopy(self)
del adict._app_context
return transforms.dumps(
adict,
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Unit13(object):
"""An object to represent a Unit, Assessment or Link (version 1.3)."""
def __init__(self):
self.unit_id = 0 # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
# Only valid for the unit.type == verify.UNIT_TYPE_LINK.
self.href = None
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.weight = 0
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.workflow_yaml = DEFAULT_AUTO_GRADER_WORKFLOW
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
@property
def workflow(self):
"""Returns the workflow as an object."""
assert verify.UNIT_TYPE_ASSESSMENT == self.type
workflow = Workflow(self.workflow_yaml)
return workflow
class Lesson13(object):
"""An object to represent a Lesson (version 1.3)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.now_available = False
self.has_activity = False
self.activity_title = ''
self.activity_listed = True
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def index(self):
return self._index
@property
def activity(self):
"""A symbolic name to old attribute."""
return self.has_activity
class PersistentCourse13(object):
"""A representation of a Course13 optimized for persistence."""
COURSES_FILENAME = 'data/course.json'
def __init__(self, next_id=None, units=None, lessons=None):
self.version = CourseModel13.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
def to_dict(self):
"""Saves object attributes into a dict."""
result = {}
result['version'] = str(self.version)
result['next_id'] = int(self.next_id)
units = []
for unit in self.units:
units.append(transforms.instance_to_dict(unit))
result['units'] = units
lessons = []
for lesson in self.lessons:
lessons.append(transforms.instance_to_dict(lesson))
result['lessons'] = lessons
return result
def _from_dict(self, adict):
"""Loads instance attributes from the dict."""
self.next_id = int(adict.get('next_id'))
self.units = []
unit_dicts = adict.get('units')
if unit_dicts:
for unit_dict in unit_dicts:
unit = Unit13()
defaults = {'workflow_yaml': DEFAULT_AUTO_GRADER_WORKFLOW}
transforms.dict_to_instance(unit_dict, unit, defaults=defaults)
self.units.append(unit)
self.lessons = []
lesson_dicts = adict.get('lessons')
if lesson_dicts:
for lesson_dict in lesson_dicts:
lesson = Lesson13()
defaults = {'activity_listed': True}
transforms.dict_to_instance(
lesson_dict, lesson, defaults=defaults)
self.lessons.append(lesson)
@classmethod
def save(cls, app_context, course):
"""Saves course to datastore."""
persistent = PersistentCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons)
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
app_context.fs.put(filename, vfs.FileStreamWrapped(
None, persistent.serialize()))
@classmethod
def load(cls, app_context):
"""Loads course from datastore."""
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
if app_context.fs.isfile(filename):
persistent = PersistentCourse13()
persistent.deserialize(app_context.fs.get(filename))
return CourseModel13(
app_context, next_id=persistent.next_id,
units=persistent.units, lessons=persistent.lessons)
return None
def serialize(self):
"""Saves instance to a JSON representation."""
adict = self.to_dict()
json_text = transforms.dumps(adict)
return json_text.encode('utf-8')
def deserialize(self, binary_data):
"""Loads instance from a JSON representation."""
json_text = binary_data.decode('utf-8')
adict = transforms.loads(json_text)
if self.version != adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self._from_dict(adict)
class CachedCourse13(AbstractCachedObject):
"""A representation of a Course13 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_3
def __init__(
self, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
self.version = self.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
# This is almost the same as PersistentCourse13 above, but it also
# stores additional indexes used for performance optimizations. There
# is no need to persist these indexes in durable storage, but it is
# nice to have them in memcache.
self.unit_id_to_lesson_ids = unit_id_to_lesson_ids
@classmethod
def new_memento(cls):
return CachedCourse13()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel13(
app_context, next_id=memento.next_id,
units=memento.units, lessons=memento.lessons,
unit_id_to_lesson_ids=memento.unit_id_to_lesson_ids)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons,
unit_id_to_lesson_ids=course.unit_id_to_lesson_ids)
class CourseModel13(object):
"""A course defined in terms of objects (version 1.3)."""
VERSION = COURSE_MODEL_VERSION_1_3
@classmethod
def load(cls, app_context):
"""Loads course from memcache or persistence."""
course = CachedCourse13.load(app_context)
if not course:
course = PersistentCourse13.load(app_context)
if course:
CachedCourse13.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lesson_ids = {}
for lesson in lessons:
key = str(lesson.unit_id)
if key not in unit_id_to_lesson_ids:
unit_id_to_lesson_ids[key] = []
unit_id_to_lesson_ids[key].append(str(lesson.lesson_id))
return unit_id_to_lesson_ids
def __init__(
self, app_context, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
# Init default values.
self._app_context = app_context
self._next_id = 1 # a counter for creating sequential entity ids
self._units = []
self._lessons = []
self._unit_id_to_lesson_ids = {}
# These array keep dirty object in current transaction.
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
# Set provided values.
if next_id:
self._next_id = next_id
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lesson_ids:
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
else:
self._index()
@property
def app_context(self):
return self._app_context
@property
def next_id(self):
return self._next_id
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lesson_ids(self):
return self._unit_id_to_lesson_ids
def _get_next_id(self):
"""Allocates next id in sequence."""
next_id = self._next_id
self._next_id += 1
return next_id
def _index(self):
"""Indexes units and lessons."""
self._unit_id_to_lesson_ids = self._make_unit_id_to_lessons_lookup_dict(
self._lessons)
index_units_and_lessons(self)
def is_dirty(self):
"""Checks if course object has been modified and needs to be saved."""
return self._dirty_units or self._dirty_lessons
def _flush_deleted_objects(self):
"""Delete files owned by deleted objects."""
# TODO(psimakov): handle similarly add_unit() and set_assessment()
# To delete an activity/assessment one must look up its filename. This
# requires a valid unit/lesson. If unit was deleted it's no longer
# found in _units, same for lesson. So we temporarily install deleted
# unit/lesson array instead of actual. We also temporarily empty
# so _unit_id_to_lesson_ids is not accidentally used. This is a hack,
# and we will improve it as object model gets more complex, but for
# now it works fine.
units = self._units
lessons = self._lessons
unit_id_to_lesson_ids = self._unit_id_to_lesson_ids
try:
self._units = self._deleted_units
self._lessons = self._deleted_lessons
self._unit_id_to_lesson_ids = None
# Delete owned assessments.
for unit in self._deleted_units:
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
self._delete_assessment(unit)
# Delete owned activities.
for lesson in self._deleted_lessons:
if lesson.has_activity:
self._delete_activity(lesson)
finally:
self._units = units
self._lessons = lessons
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
def _update_dirty_objects(self):
"""Update files owned by course."""
fs = self.app_context.fs
# Update state of owned assessments.
for unit in self._dirty_units:
unit = self.find_unit_by_id(unit.unit_id)
if not unit or verify.UNIT_TYPE_ASSESSMENT != unit.type:
continue
path = fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not unit.now_available)
# Update state of owned activities.
for lesson in self._dirty_lessons:
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson or not lesson.has_activity:
continue
path = fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not lesson.now_available)
def save(self):
"""Saves course to datastore and memcache."""
self._flush_deleted_objects()
self._update_dirty_objects()
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
self._index()
PersistentCourse13.save(self._app_context, self)
CachedCourse13.delete(self._app_context)
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
lesson_ids = self._unit_id_to_lesson_ids.get(str(unit_id))
lessons = []
if lesson_ids:
for lesson_id in lesson_ids:
lessons.append(self.find_lesson_by_id(None, lesson_id))
return lessons
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit
assert verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def get_review_form_filename(self, unit_id):
"""Returns review form filename."""
unit = self.find_unit_by_id(unit_id)
assert unit
assert verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/review-%s.js' % unit.unit_id
def get_activity_filename(self, unused_unit_id, lesson_id):
"""Returns activity base filename."""
lesson = self.find_lesson_by_id(None, lesson_id)
assert lesson
if lesson.has_activity:
return 'assets/js/activity-%s.js' % lesson_id
return None
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def find_lesson_by_id(self, unused_unit, lesson_id):
"""Finds a lesson given its id."""
for lesson in self._lessons:
if str(lesson.lesson_id) == str(lesson_id):
return lesson
return None
def add_unit(self, unit_type, title):
"""Adds a brand new unit."""
assert unit_type in verify.UNIT_TYPES
unit = Unit13()
unit.type = unit_type
unit.unit_id = self._get_next_id()
unit.title = title
unit.now_available = False
self._units.append(unit)
self._index()
self._dirty_units.append(unit)
return unit
def add_lesson(self, unit, title):
"""Adds brand new lesson to a unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
lesson = Lesson13()
lesson.lesson_id = self._get_next_id()
lesson.unit_id = unit.unit_id
lesson.title = title
lesson.now_available = False
self._lessons.append(lesson)
self._index()
self._dirty_lessons.append(lesson)
return lesson
def move_lesson_to(self, lesson, unit):
"""Moves a lesson to another unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
assert verify.UNIT_TYPE_UNIT == unit.type
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
assert lesson
lesson.unit_id = unit.unit_id
self._index()
return lesson
def _delete_activity(self, lesson):
"""Deletes activity."""
filename = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
return True
return False
def _delete_assessment(self, unit):
"""Deletes assessment."""
files_deleted_count = 0
filenames = [
self._app_context.fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id)),
self._app_context.fs.impl.physical_to_logical(
self.get_review_form_filename(unit.unit_id))]
for filename in filenames:
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
files_deleted_count += 1
return bool(files_deleted_count)
def delete_all(self):
"""Deletes all course files."""
for entity in self._app_context.fs.impl.list(
appengine_config.BUNDLE_ROOT):
self._app_context.fs.impl.delete(entity)
assert not self._app_context.fs.impl.list(appengine_config.BUNDLE_ROOT)
CachedCourse13.delete(self._app_context)
def delete_lesson(self, lesson):
"""Delete a lesson."""
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson:
return False
self._lessons.remove(lesson)
self._index()
self._deleted_lessons.append(lesson)
self._dirty_lessons.append(lesson)
return True
def delete_unit(self, unit):
"""Deletes a unit."""
unit = self.find_unit_by_id(unit.unit_id)
if not unit:
return False
for lesson in self.get_lessons(unit.unit_id):
self.delete_lesson(lesson)
self._units.remove(unit)
self._index()
self._deleted_units.append(unit)
self._dirty_units.append(unit)
return True
def update_unit(self, unit):
"""Updates an existing unit."""
existing_unit = self.find_unit_by_id(unit.unit_id)
if not existing_unit:
return False
existing_unit.title = unit.title
existing_unit.release_date = unit.release_date
existing_unit.now_available = unit.now_available
if verify.UNIT_TYPE_LINK == existing_unit.type:
existing_unit.href = unit.href
if verify.UNIT_TYPE_ASSESSMENT == existing_unit.type:
existing_unit.weight = unit.weight
existing_unit.workflow_yaml = unit.workflow_yaml
self._dirty_units.append(existing_unit)
return existing_unit
def update_lesson(self, lesson):
"""Updates an existing lesson."""
existing_lesson = self.find_lesson_by_id(
lesson.unit_id, lesson.lesson_id)
if not existing_lesson:
return False
existing_lesson.title = lesson.title
existing_lesson.unit_id = lesson.unit_id
existing_lesson.objectives = lesson.objectives
existing_lesson.video = lesson.video
existing_lesson.notes = lesson.notes
existing_lesson.activity_title = lesson.activity_title
self._index()
self._dirty_lessons.append(existing_lesson)
return existing_lesson
def reorder_units(self, order_data):
"""Reorder the units and lessons based on the order data given.
Args:
order_data: list of dict. Format is
The order_data is in the following format:
[
{'id': 0, 'lessons': [{'id': 0}, {'id': 1}, {'id': 2}]},
{'id': 1},
{'id': 2, 'lessons': [{'id': 0}, {'id': 1}]}
...
]
"""
reordered_units = []
unit_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
reordered_units.append(self.find_unit_by_id(unit_id))
unit_ids.add(unit_id)
assert len(unit_ids) == len(self._units)
self._units = reordered_units
reordered_lessons = []
lesson_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
if verify.UNIT_TYPE_UNIT != unit.type:
continue
for lesson_data in unit_data['lessons']:
lesson_id = lesson_data['id']
reordered_lessons.append(
self.find_lesson_by_id(None, lesson_id))
lesson_ids.add((unit_id, lesson_id))
assert len(lesson_ids) == len(self._lessons)
self._lessons = reordered_lessons
self._index()
def _get_assessment_as_dict(self, filename):
"""Gets the content of an assessment file as a Python dict."""
path = self._app_context.fs.impl.physical_to_logical(filename)
root_name = 'assessment'
assessment_content = self.app_context.fs.get(path)
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
return assessment
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._get_assessment_as_dict(
self.get_assessment_filename(unit.unit_id))
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._get_assessment_as_dict(
self.get_review_form_filename(unit.unit_id))
def set_assessment_file_content(
self, unit, assessment_content, dest_filename, errors=None):
"""Updates the content of an assessment file on the file system."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(dest_filename)
root_name = 'assessment'
try:
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_assessment_instance(assessment, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(assessment_content),
is_draft=not unit.now_available)
def set_assessment_content(self, unit, assessment_content, errors=None):
"""Updates the content of an assessment."""
self.set_assessment_file_content(
unit,
assessment_content,
self.get_assessment_filename(unit.unit_id),
errors=errors
)
def set_review_form(self, unit, review_form, errors=None):
"""Sets the content of a review form."""
self.set_assessment_file_content(
unit,
review_form,
self.get_review_form_filename(unit.unit_id),
errors=errors
)
def set_activity_content(self, lesson, activity_content, errors=None):
"""Updates the content of an activity."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(lesson.unit_id, lesson.lesson_id))
root_name = 'activity'
try:
content, noverify_text = verify.convert_javascript_to_python(
activity_content, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_activity_instance(activity, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(activity_content),
is_draft=not lesson.now_available)
def import_from(self, src_course, errors):
"""Imports a content of another course into this course."""
def copy_unit12_into_unit13(src_unit, dst_unit):
"""Copies unit object attributes between versions."""
assert dst_unit.type == src_unit.type
dst_unit.title = src_unit.title
dst_unit.release_date = src_unit.release_date
dst_unit.now_available = src_unit.now_available
if verify.UNIT_TYPE_LINK == dst_unit.type:
dst_unit.href = src_unit.href
# Copy over the assessment. Note that we copy files directly and
# avoid all logical validations of their content. This is done for
# a purpose - at this layer we don't care what is in those files.
if verify.UNIT_TYPE_ASSESSMENT == dst_unit.type:
if dst_unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
dst_unit.weight = (
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[dst_unit.unit_id])
filepath_mappings = [{
'src': src_course.get_assessment_filename(src_unit.unit_id),
'dst': self.get_assessment_filename(dst_unit.unit_id)
}, {
'src': src_course.get_review_form_filename(
src_unit.unit_id),
'dst': self.get_review_form_filename(dst_unit.unit_id)
}]
for mapping in filepath_mappings:
src_filename = os.path.join(
src_course.app_context.get_home(), mapping['src'])
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(), mapping['dst'])
self.app_context.fs.put(dst_filename, astream)
dst_unit.workflow_yaml = src_unit.workflow_yaml
def copy_lesson12_into_lesson13(
src_unit, src_lesson, unused_dst_unit, dst_lesson):
"""Copies lessons object attributes between versions."""
dst_lesson.objectives = src_lesson.objectives
dst_lesson.video = src_lesson.video
dst_lesson.notes = src_lesson.notes
dst_lesson.duration = src_lesson.duration
dst_lesson.has_activity = src_lesson.activity
dst_lesson.activity_title = src_lesson.activity_title
# Old model does not have this flag, but all lessons are available.
dst_lesson.now_available = True
# Copy over the activity. Note that we copy files directly and
# avoid all logical validations of their content. This is done for a
# purpose - at this layer we don't care what is in those files.
if src_lesson.activity:
src_filename = os.path.join(
src_course.app_context.get_home(),
src_course.get_activity_filename(
src_unit.unit_id, src_lesson.lesson_id))
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(),
self.get_activity_filename(
None, dst_lesson.lesson_id))
self.app_context.fs.put(dst_filename, astream)
if not is_editable_fs(self._app_context):
errors.append(
'Target course %s must be '
'on read-write media.' % self.app_context.raw)
return None, None
if self.get_units():
errors.append(
'Target course %s must be '
'empty.' % self.app_context.raw)
return None, None
# Iterate over course structure and assets and import each item.
for unit in src_course.get_units():
new_unit = self.add_unit(unit.type, unit.title)
copy_unit12_into_unit13(unit, new_unit)
for lesson in src_course.get_lessons(unit.unit_id):
new_lesson = self.add_lesson(new_unit, lesson.title)
copy_lesson12_into_lesson13(unit, lesson, new_unit, new_lesson)
return src_course, self
def to_json(self):
"""Creates JSON representation of this instance."""
persistent = PersistentCourse13(
next_id=self._next_id, units=self._units, lessons=self._lessons)
return transforms.dumps(
persistent.to_dict(),
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Workflow(object):
"""Stores workflow specifications for assessments."""
def __init__(self, yaml_str):
"""Sets yaml_str (the workflow spec), without doing any validation."""
self._yaml_str = yaml_str
def to_yaml(self):
return self._yaml_str
def to_dict(self):
if not self._yaml_str:
return {}
obj = yaml.safe_load(self._yaml_str)
assert isinstance(obj, dict)
return obj
def _convert_date_string_to_datetime(self, date_str):
"""Returns a datetime object."""
return datetime.strptime(date_str, ISO_8601_DATE_FORMAT)
def get_grader(self):
"""Returns the associated grader."""
return self.to_dict().get(GRADER_KEY)
def get_matcher(self):
return self.to_dict().get(MATCHER_KEY)
def get_submission_due_date(self):
date_str = self.to_dict().get(SUBMISSION_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_due_date(self):
date_str = self.to_dict().get(REVIEW_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_min_count(self):
return self.to_dict().get(REVIEW_MIN_COUNT_KEY)
def get_review_window_mins(self):
return self.to_dict().get(REVIEW_WINDOW_MINS_KEY)
def _ensure_value_is_nonnegative_int(self, workflow_dict, key, errors):
"""Checks that workflow_dict[key] is a non-negative integer."""
value = workflow_dict[key]
if not isinstance(value, int):
errors.append('%s should be an integer' % key)
elif value < 0:
errors.append('%s should be a non-negative integer' % key)
def validate(self, errors=None):
"""Tests whether the current Workflow object is valid."""
if errors is None:
errors = []
try:
# Validate the workflow specification (in YAML format).
assert self._yaml_str, 'missing key: %s.' % GRADER_KEY
workflow_dict = yaml.safe_load(self._yaml_str)
assert isinstance(workflow_dict, dict), (
'expected the YAML representation of a dict')
assert GRADER_KEY in workflow_dict, 'missing key: %s.' % GRADER_KEY
assert workflow_dict[GRADER_KEY] in ALLOWED_GRADERS, (
'invalid grader, should be one of: %s' %
', '.join(ALLOWED_GRADERS))
if workflow_dict[GRADER_KEY] == HUMAN_GRADER:
missing_keys = []
for key in HUMAN_GRADED_ASSESSMENT_KEY_LIST:
if key not in workflow_dict:
missing_keys.append(key)
assert not missing_keys, (
'missing key(s) for a human-reviewed assessment: %s.' %
', '.join(missing_keys))
workflow_errors = []
if (workflow_dict[MATCHER_KEY] not in
review.ALLOWED_MATCHERS):
workflow_errors.append(
'invalid matcher, should be one of: %s' %
', '.join(review.ALLOWED_MATCHERS))
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_MIN_COUNT_KEY, workflow_errors)
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_WINDOW_MINS_KEY, workflow_errors)
try:
submission_due_date = self._convert_date_string_to_datetime(
workflow_dict[SUBMISSION_DUE_DATE_KEY])
review_due_date = self._convert_date_string_to_datetime(
workflow_dict[REVIEW_DUE_DATE_KEY])
if submission_due_date > review_due_date:
workflow_errors.append(
'submission due date should be earlier than '
'review due date')
except Exception as e: # pylint: disable-msg=broad-except
workflow_errors.append(
'dates should be formatted as YYYY-MM-DD hh:mm '
'(e.g. 1997-07-16 19:20) and be specified in the UTC '
'timezone')
if workflow_errors:
raise Exception('%s.' % '; '.join(workflow_errors))
return True
except Exception as e: # pylint: disable-msg=broad-except
errors.append('Error validating workflow specification: %s' % e)
return False
class Course(object):
"""Manages a course and all of its components."""
@classmethod
def get_environ(cls, app_context):
"""Returns currently defined course settings as a dictionary."""
course_yaml = None
course_yaml_dict = None
course_data_filename = app_context.get_config_filename()
if app_context.fs.isfile(course_data_filename):
course_yaml = app_context.fs.open(course_data_filename)
if not course_yaml:
return DEFAULT_COURSE_YAML_DICT
try:
course_yaml_dict = yaml.safe_load(
course_yaml.read().decode('utf-8'))
except Exception as e: # pylint: disable-msg=broad-except
logging.info(
'Error: course.yaml file at %s not accessible, '
'loading defaults. %s', course_data_filename, e)
if not course_yaml_dict:
return DEFAULT_COURSE_YAML_DICT
return deep_dict_merge(
course_yaml_dict, DEFAULT_EXISTING_COURSE_YAML_DICT)
@property
def version(self):
return self._model.VERSION
@classmethod
def create_new_default_course(cls, app_context):
return CourseModel13(app_context)
@classmethod
def custom_new_default_course_for_test(cls, app_context):
# There is an expectation in our tests of automatic import
# of data/*.csv files. This method can be used in tests to achieve
# exactly that.
model = CourseModel12.load(app_context)
if model:
return model
return CourseModel13(app_context)
@classmethod
def _load(cls, app_context):
"""Loads course data from persistence storage into this instance."""
if not is_editable_fs(app_context):
model = CourseModel12.load(app_context)
if model:
return model
else:
model = CourseModel13.load(app_context)
if model:
return model
return cls.create_new_default_course(app_context)
def __init__(self, handler, app_context=None):
self._app_context = app_context if app_context else handler.app_context
self._namespace = self._app_context.get_namespace_name()
self._model = self._load(self._app_context)
self._tracker = None
self._reviews_processor = None
@property
def app_context(self):
return self._app_context
def to_json(self):
return self._model.to_json()
def get_progress_tracker(self):
if not self._tracker:
self._tracker = progress.UnitLessonCompletionTracker(self)
return self._tracker
def get_reviews_processor(self):
if not self._reviews_processor:
self._reviews_processor = review.ReviewsProcessor(self)
return self._reviews_processor
def get_units(self):
return self._model.get_units()
def get_lessons(self, unit_id):
return self._model.get_lessons(unit_id)
def save(self):
return self._model.save()
def find_unit_by_id(self, unit_id):
return self._model.find_unit_by_id(unit_id)
def find_lesson_by_id(self, unit, lesson_id):
return self._model.find_lesson_by_id(unit, lesson_id)
def is_last_assessment(self, unit):
"""Checks whether the given unit is the last of all the assessments."""
for current_unit in reversed(self.get_units()):
if current_unit.type == verify.UNIT_TYPE_ASSESSMENT:
return current_unit.unit_id == unit.unit_id
return False
def add_unit(self):
"""Adds new unit to a course."""
return self._model.add_unit('U', 'New Unit')
def add_link(self):
"""Adds new link (other) to a course."""
return self._model.add_unit('O', 'New Link')
def add_assessment(self):
"""Adds new assessment to a course."""
return self._model.add_unit('A', 'New Assessment')
def add_lesson(self, unit):
return self._model.add_lesson(unit, 'New Lesson')
def update_unit(self, unit):
return self._model.update_unit(unit)
def update_lesson(self, lesson):
return self._model.update_lesson(lesson)
def move_lesson_to(self, lesson, unit):
return self._model.move_lesson_to(lesson, unit)
def delete_all(self):
return self._model.delete_all()
def delete_unit(self, unit):
return self._model.delete_unit(unit)
def delete_lesson(self, lesson):
return self._model.delete_lesson(lesson)
def get_score(self, student, assessment_id):
"""Gets a student's score for a particular assessment."""
assert self.is_valid_assessment_id(assessment_id)
scores = transforms.loads(student.scores) if student.scores else {}
return scores.get(assessment_id) if scores else None
def get_overall_score(self, student):
"""Gets the overall course score for a student."""
score_list = self.get_all_scores(student)
overall_score = 0
total_weight = 0
for unit in score_list:
if not unit['human_graded']:
total_weight += unit['weight']
overall_score += unit['weight'] * unit['score']
if total_weight == 0:
return None
return int(float(overall_score) / total_weight)
def get_overall_result(self, student):
"""Gets the overall result based on a student's score profile."""
score = self.get_overall_score(student)
if score is None:
return None
# This can be replaced with a custom definition for an overall result
# string.
return 'pass' if self.get_overall_score(student) >= 70 else 'fail'
def get_all_scores(self, student):
"""Gets all score data for a student.
Args:
student: the student whose scores should be retrieved.
Returns:
an array of dicts, each representing an assessment. Each dict has
the keys 'id', 'title', 'weight' and 'score' (if available),
representing the unit id, the assessment title, the weight
contributed by the assessment to the final score, and the
assessment score.
"""
assessment_list = self.get_assessment_list()
scores = transforms.loads(student.scores) if student.scores else {}
unit_progress = self.get_progress_tracker().get_unit_progress(student)
assessment_score_list = []
for unit in assessment_list:
# Compute the weight for this assessment.
weight = 0
if hasattr(unit, 'weight'):
weight = unit.weight
elif unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
weight = DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[unit.unit_id]
completed = unit_progress[unit.unit_id]
# If a human-reviewed assessment is completed, ensure that the
# required reviews have also been completed.
if completed and self.needs_human_grader(unit):
reviews = self.get_reviews_processor().get_review_steps_by(
unit.unit_id, student.get_key())
review_min_count = unit.workflow.get_review_min_count()
if not review.ReviewUtils.has_completed_enough_reviews(
reviews, review_min_count):
completed = False
assessment_score_list.append({
'id': str(unit.unit_id),
'title': unit.title,
'weight': weight,
'completed': completed,
'human_graded': self.needs_human_grader(unit),
'score': (scores[str(unit.unit_id)]
if str(unit.unit_id) in scores else 0),
})
return assessment_score_list
def get_assessment_list(self):
"""Returns a list of units that are assessments."""
# TODO(psimakov): Streamline this so that it does not require a full
# iteration on each request, probably by modifying the index() method.
assessment_list = []
for unit in self.get_units():
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
assessment_list.append(unit)
return copy.deepcopy(assessment_list)
def get_peer_reviewed_units(self):
"""Returns a list of units that are peer-reviewed assessments.
Returns:
A list of units that are peer-reviewed assessments. Each unit
in the list has a unit_id of type string.
"""
assessment_list = self.get_assessment_list()
units = copy.deepcopy([unit for unit in assessment_list if (
unit.workflow.get_grader() == HUMAN_GRADER and
unit.workflow.get_matcher() == review.PEER_MATCHER)])
for unit in units:
unit.unit_id = str(unit.unit_id)
return units
def get_assessment_filename(self, unit_id):
return self._model.get_assessment_filename(unit_id)
def get_review_form_filename(self, unit_id):
return self._model.get_review_form_filename(unit_id)
def get_activity_filename(self, unit_id, lesson_id):
return self._model.get_activity_filename(unit_id, lesson_id)
def needs_human_grader(self, unit):
return unit.workflow.get_grader() == HUMAN_GRADER
def reorder_units(self, order_data):
return self._model.reorder_units(order_data)
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._model.get_assessment_content(unit)
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._model.get_review_form_content(unit)
def set_assessment_content(self, unit, assessment_content, errors=None):
return self._model.set_assessment_content(
unit, assessment_content, errors=errors)
def set_review_form(self, unit, review_form, errors=None):
return self._model.set_review_form(unit, review_form, errors=errors)
def set_activity_content(self, lesson, activity_content, errors=None):
return self._model.set_activity_content(
lesson, activity_content, errors=errors)
def is_valid_assessment_id(self, assessment_id):
"""Tests whether the given assessment id is valid."""
for unit in self.get_units():
if (verify.UNIT_TYPE_ASSESSMENT == unit.type and
str(assessment_id) == str(unit.unit_id)):
return True
return False
def is_valid_unit_lesson_id(self, unit_id, lesson_id):
"""Tests whether the given unit id and lesson id are valid."""
for unit in self.get_units():
if str(unit.unit_id) == str(unit_id):
for lesson in self.get_lessons(unit_id):
if str(lesson.lesson_id) == str(lesson_id):
return True
return False
def import_from(self, app_context, errors=None):
"""Import course structure and assets from another courses."""
src_course = Course(None, app_context=app_context)
if errors is None:
errors = []
# Import 1.2 -> 1.3
if (src_course.version == CourseModel12.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
# import 1.3 -> 1.3
if (src_course.version == CourseModel13.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
errors.append(
'Import of '
'course %s (version %s) into '
'course %s (version %s) '
'is not supported.' % (
app_context.raw, src_course.version,
self.app_context.raw, self.version))
return None, None
def get_course_announcement_list_email(self):
"""Get Announcement email address for the course."""
course_env = self.get_environ(self._app_context)
if not course_env:
return None
if 'course' not in course_env:
return None
course_dict = course_env['course']
if 'announcement_list_email' not in course_dict:
return None
announcement_list_email = course_dict['announcement_list_email']
if announcement_list_email:
return announcement_list_email
return None
def init_new_course_settings(self, title, admin_email):
"""Initializes new course.yaml file if it does not yet exists."""
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if fs.isfile(course_yaml):
return False
title = title.replace('\'', '\'\'')
course_yaml_text = u"""# my new course.yaml
course:
title: '%s'
admin_user_emails: '[%s]'
now_available: False
""" % (title, admin_email)
fs.put(course_yaml, vfs.string_to_stream(course_yaml_text))
return True
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.