code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/dashboard/analytics."""
__author__ = 'Julia Oh(juliaoh@google.com)'
import os
import appengine_config
from controllers import sites
from controllers import utils
from models import config
from models import courses
from models import models
from models import transforms
from models.progress import ProgressStats
from models.progress import UnitLessonCompletionTracker
from modules.dashboard import analytics
import actions
from actions import assert_contains
from actions import assert_does_not_contain
from actions import assert_equals
class ProgressAnalyticsTest(actions.TestBase):
"""Tests the progress analytics page on the Course Author dashboard."""
def enable_progress_tracking(self):
config.Registry.test_overrides[
utils.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
def test_empty_student_progress_stats_analytics_displays_nothing(self):
"""Test analytics page on course dashboard when no progress stats."""
# The admin looks at the analytics page on the board to check right
# message when no progress has been recorded.
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=analytics')
assert_contains(
'Google > Dashboard > Analytics', response.body)
assert_contains('have not been calculated yet', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert len(self.taskq.GetTasks('default')) == 4
response = self.get('dashboard?action=analytics')
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('were last updated at', response.body)
assert_contains('currently enrolled: 0', response.body)
assert_contains('total: 0', response.body)
assert_contains('Student Progress Statistics', response.body)
assert_contains(
'No student progress has been recorded for this course.',
response.body)
actions.logout()
def test_student_progress_stats_analytics_displays_on_dashboard(self):
"""Test analytics page on course dashboard."""
self.enable_progress_tracking()
student1 = 'student1@google.com'
name1 = 'Test Student 1'
student2 = 'student2@google.com'
name2 = 'Test Student 2'
# Student 1 completes a unit.
actions.login(student1)
actions.register(self, name1)
actions.view_unit(self)
actions.logout()
# Student 2 completes a unit.
actions.login(student2)
actions.register(self, name2)
actions.view_unit(self)
actions.logout()
# Admin logs back in and checks if progress exists.
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=analytics')
assert_contains(
'Google > Dashboard > Analytics', response.body)
assert_contains('have not been calculated yet', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert len(self.taskq.GetTasks('default')) == 4
response = self.get('dashboard?action=analytics')
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('were last updated at', response.body)
assert_contains('currently enrolled: 2', response.body)
assert_contains('total: 2', response.body)
assert_contains('Student Progress Statistics', response.body)
assert_does_not_contain(
'No student progress has been recorded for this course.',
response.body)
# JSON code for the completion statistics.
assert_contains(
'\\"u.1.l.1\\": {\\"progress\\": 0, \\"completed\\": 2}',
response.body)
assert_contains(
'\\"u.1\\": {\\"progress\\": 2, \\"completed\\": 0}',
response.body)
def test_get_entity_id_wrapper_in_progress_works(self):
"""Tests get_entity_id wrappers in progress.ProgressStats."""
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
progress_stats = ProgressStats(course)
unit1 = course.add_unit()
# pylint: disable-msg=protected-access
assert_equals(
progress_stats._get_unit_ids_of_type_unit(), [unit1.unit_id])
assessment1 = course.add_assessment()
assert_equals(
progress_stats._get_assessment_ids(), [assessment1.unit_id])
lesson11 = course.add_lesson(unit1)
lesson12 = course.add_lesson(unit1)
assert_equals(
progress_stats._get_lesson_ids(unit1.unit_id),
[lesson11.lesson_id, lesson12.lesson_id])
lesson11.has_activity = True
course.set_activity_content(lesson11, u'var activity=[]', [])
assert_equals(
progress_stats._get_activity_ids(unit1.unit_id, lesson11.lesson_id),
[0])
assert_equals(
progress_stats._get_activity_ids(unit1.unit_id, lesson12.lesson_id),
[])
def test_get_entity_label_wrapper_in_progress_works(self):
"""Tests get_entity_label wrappers in progress.ProgressStats."""
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
progress_stats = ProgressStats(course)
unit1 = course.add_unit()
# pylint: disable-msg=protected-access
assert_equals(
progress_stats._get_unit_label(unit1.unit_id),
'Unit %s' % unit1.index)
assessment1 = course.add_assessment()
assert_equals(
progress_stats._get_assessment_label(assessment1.unit_id),
assessment1.title)
lesson11 = course.add_lesson(unit1)
lesson12 = course.add_lesson(unit1)
assert_equals(
progress_stats._get_lesson_label(unit1.unit_id, lesson11.lesson_id),
lesson11.index)
lesson11.has_activity = True
course.set_activity_content(lesson11, u'var activity=[]', [])
assert_equals(
progress_stats._get_activity_label(
unit1.unit_id, lesson11.lesson_id, 0), 'L1.1')
assert_equals(
progress_stats._get_activity_label(
unit1.unit_id, lesson12.lesson_id, 0), 'L1.2')
lesson12.objectives = """
<question quid="123" weight="1" instanceid=1></question>
random_text
<gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube>
more_random_text
<question-group qgid="456" instanceid=2></question-group>
yet_more_random_text
"""
assert_equals(
progress_stats._get_component_ids(
unit1.unit_id, lesson12.lesson_id, 0), [u'1', u'2'])
def test_compute_entity_dict_constructs_dict_correctly(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
progress_stats = ProgressStats(course)
course_dict = progress_stats.compute_entity_dict('course', [])
assert_equals(course_dict, {
'label': 'UNTITLED COURSE', 'u': {}, 's': {}})
def test_compute_entity_dict_constructs_dict_for_empty_course_correctly(
self):
"""Tests correct entity_structure is built."""
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
unit1 = course.add_unit()
assessment1 = course.add_assessment()
progress_stats = ProgressStats(course)
# pylint: disable-msg=g-inconsistent-quotes
assert_equals(
progress_stats.compute_entity_dict('course', []),
{'label': 'UNTITLED COURSE', 'u': {unit1.unit_id: {
'label': 'Unit %s' % unit1.index, 'l': {}}}, 's': {
assessment1.unit_id: {'label': assessment1.title}}})
lesson11 = course.add_lesson(unit1)
assert_equals(
progress_stats.compute_entity_dict('course', []),
{
"s": {
assessment1.unit_id: {
"label": assessment1.title
}
},
"u": {
unit1.unit_id: {
"l": {
lesson11.lesson_id: {
"a": {},
"h": {
0: {
"c": {},
"label": "L1.1"
}
},
"label": lesson11.index
}
},
"label": "Unit %s" % unit1.index
}
},
'label': 'UNTITLED COURSE'
})
lesson11.objectives = """
<question quid="123" weight="1" instanceid="1"></question>
random_text
<gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube>
more_random_text
<question-group qgid="456" instanceid="2"></question-group>
yet_more_random_text
"""
assert_equals(
progress_stats.compute_entity_dict('course', []),
{
"s": {
assessment1.unit_id: {
"label": assessment1.title
}
},
"u": {
unit1.unit_id: {
"l": {
lesson11.lesson_id: {
"a": {},
"h": {
0: {
"c": {
u'1': {
"label": "L1.1.1"
},
u'2': {
"label": "L1.1.2"
}
},
"label": "L1.1"
}
},
"label": lesson11.index
}
},
"label": "Unit %s" % unit1.index
}
},
"label": 'UNTITLED COURSE'
})
class QuestionAnalyticsTest(actions.TestBase):
"""Tests the question analytics page from Course Author dashboard."""
def _enable_activity_tracking(self):
config.Registry.test_overrides[
utils.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
def _get_sample_v15_course(self):
"""Creates a course with different types of questions and returns it."""
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
unit1 = course.add_unit()
lesson1 = course.add_lesson(unit1)
assessment_old = course.add_assessment()
assessment_old.title = 'Old assessment'
assessment_new = course.add_assessment()
assessment_new.title = 'New assessment'
assessment_peer = course.add_assessment()
assessment_peer.title = 'Peer review assessment'
# Create a multiple choice question.
mcq_new_id = 1
mcq_new_dict = {
'description': 'mcq_new',
'type': 0, # Multiple choice question.
'choices': [{
'text': 'answer',
'score': 1.0
}],
'version': '1.5'
}
mcq_new_dto = models.QuestionDTO(mcq_new_id, mcq_new_dict)
# Create a short answer question.
frq_new_id = 2
frq_new_dict = {
'defaultFeedback': '',
'rows': 1,
'description': 'short answer',
'hint': '',
'graders': [{
'matcher': 'case_insensitive',
'score': '1.0',
'response': 'hi',
'feedback': ''
}],
'question': 'short answer question',
'version': '1.5',
'type': 1, # Short answer question.
'columns': 100
}
frq_new_dto = models.QuestionDTO(frq_new_id, frq_new_dict)
# Save these questions to datastore.
models.QuestionDAO.save_all([mcq_new_dto, frq_new_dto])
# Create a question group.
question_group_id = 3
question_group_dict = {
'description': 'question_group',
'items': [
{'question': str(mcq_new_id)},
{'question': str(frq_new_id)},
{'question': str(mcq_new_id)}
],
'version': '1.5',
'introduction': ''
}
question_group_dto = models.QuestionGroupDTO(
question_group_id, question_group_dict)
# Save the question group to datastore.
models.QuestionGroupDAO.save_all([question_group_dto])
# Add a MC question and a question group to leesson1.
lesson1.objectives = """
<question quid="1" weight="1" instanceid="QN"></question>
random_text
<gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube>
more_random_text
<question-group qgid="3" instanceid="QG"></question-group>
"""
# Add a MC question, a short answer question, and a question group to
# new style assessment.
assessment_new.html_content = """
<question quid="1" weight="1" instanceid="QN2"></question>
<question quid="2" weight="1" instanceid="FRQ2"></question>
random_text
<gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube>
more_random_text
<question-group qgid="3" instanceid="QG2"></question-group>
"""
return course
def test_get_summarized_question_list_from_event(self):
"""Tests the transform functions per event type."""
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
question_aggregator = (
analytics.ComputeQuestionStats.MultipleChoiceQuestionAggregator(
course))
event_payloads = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'tests/unit/common/event_payloads.json')).read()
event_payload_dict = transforms.loads(event_payloads)
for event_info in event_payload_dict.values():
# pylint: disable-msg=protected-access
questions = question_aggregator._process_event(
event_info['event_source'], event_info['event_data'])
assert_equals(questions, event_info['transformed_dict_list'])
def test_compute_question_stats_on_empty_course_returns_empty_dicts(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
app_context = sites.get_all_courses()[0]
question_stats_computer = analytics.ComputeQuestionStats(app_context)
id_to_questions, id_to_assessments = question_stats_computer.run()
assert_equals({}, id_to_questions)
assert_equals({}, id_to_assessments)
def test_id_to_question_dict_constructed_correctly(self):
"""Tests id_to_question dicts are constructed correctly."""
course = self._get_sample_v15_course()
tracker = UnitLessonCompletionTracker(course)
assert_equals(
tracker.get_id_to_questions_dict(),
{
'u.1.l.2.c.QN': {
'answer_counts': [0],
'label': 'Unit 1 Lesson 1, Question mcq_new',
'location': 'unit?unit=1&lesson=2',
'num_attempts': 0,
'score': 0
},
'u.1.l.2.c.QG.i.0': {
'answer_counts': [0],
'label': ('Unit 1 Lesson 1, Question Group question_group '
'Question mcq_new'),
'location': 'unit?unit=1&lesson=2',
'num_attempts': 0,
'score': 0
},
'u.1.l.2.c.QG.i.2': {
'answer_counts': [0],
'label': ('Unit 1 Lesson 1, Question Group question_group '
'Question mcq_new'),
'location': 'unit?unit=1&lesson=2',
'num_attempts': 0,
'score': 0
}
}
)
assert_equals(
tracker.get_id_to_assessments_dict(),
{
's.4.c.QN2': {
'answer_counts': [0],
'label': 'New assessment, Question mcq_new',
'location': 'assessment?name=4',
'num_attempts': 0,
'score': 0
},
's.4.c.QG2.i.0': {
'answer_counts': [0],
'label': ('New assessment, Question Group question_group '
'Question mcq_new'),
'location': 'assessment?name=4',
'num_attempts': 0,
'score': 0
},
's.4.c.QG2.i.2': {
'answer_counts': [0],
'label': ('New assessment, Question Group question_group '
'Question mcq_new'),
'location': 'assessment?name=4',
'num_attempts': 0,
'score': 0
}
}
)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/search/."""
__author__ = 'Ellis Michael (emichael@google.com)'
import datetime
import logging
import re
from controllers import sites
from models import courses
from models import custom_modules
from modules.announcements import announcements
from modules.search import search
from tests.unit import modules_search as search_unit_test
import actions
from google.appengine.api import namespace_manager
class SearchTest(search_unit_test.SearchTestBase):
"""Tests the search module."""
# Don't require documentation for self-describing test methods.
# pylint: disable-msg=g-missing-docstring
@classmethod
def enable_module(cls):
custom_modules.Registry.registered_modules[
search.MODULE_NAME].enable()
assert search.custom_module.enabled
@classmethod
def disable_module(cls):
custom_modules.Registry.registered_modules[
search.MODULE_NAME].disable()
assert not search.custom_module.enabled
@classmethod
def get_xsrf_token(cls, body, form_name):
match = re.search(form_name + r'.+[\n\r].+value="([^"]+)"', body)
assert match
return match.group(1)
def index_test_course(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('/test/dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
response = self.post('/test/dashboard?action=index_course',
{'xsrf_token': index_token})
self.execute_all_deferred_tasks()
def setUp(self): # Name set by parent. pylint: disable-msg=g-bad-name
super(SearchTest, self).setUp()
self.enable_module()
self.logged_error = ''
def error_report(string, *args, **unused_kwargs):
self.logged_error = string % args
self.error_report = error_report
def test_module_disabled(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
self.disable_module()
response = self.get('/search?query=lorem', expect_errors=True)
self.assertEqual(response.status_code, 404)
response = self.get('dashboard?action=search')
self.assertIn('Google > Dashboard > Search', response.body)
self.assertNotIn('Index Course', response.body)
self.assertNotIn('Clear Index', response.body)
def test_module_enabled(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('course')
self.assertIn('gcb-search-box', response.body)
response = self.get('/search?query=lorem')
self.assertEqual(response.status_code, 200)
response = self.get('dashboard?action=search')
self.assertIn('Google > Dashboard > Search', response.body)
self.assertIn('Index Course', response.body)
self.assertIn('Clear Index', response.body)
def test_indexing_and_clearing_buttons(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
clear_token = self.get_xsrf_token(response.body, 'gcb-clear-index')
response = self.post('dashboard?action=index_course',
{'xsrf_token': index_token})
self.assertEqual(response.status_int, 302)
response = self.post('dashboard?action=clear_index',
{'xsrf_token': clear_token})
self.assertEqual(response.status_int, 302)
response = self.post('dashboard?action=index_course', {},
expect_errors=True)
assert response.status_int == 403
response = self.post('dashboard?action=clear_index', {},
expect_errors=True)
assert response.status_int == 403
def test_index_search_clear(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
response = self.get('dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
clear_token = self.get_xsrf_token(response.body, 'gcb-clear-index')
response = self.post('dashboard?action=index_course',
{'xsrf_token': index_token})
self.execute_all_deferred_tasks()
# weather is a term found in the Power Searching Course and should not
# be in the HTML returned by the patched urlfetch in SearchTestBase
response = self.get('search?query=weather')
self.assertNotIn('gcb-search-result', response.body)
# This term should be present as it is in the dummy content.
response = self.get('search?query=cogito%20ergo%20sum')
self.assertIn('gcb-search-result', response.body)
response = self.post('dashboard?action=clear_index',
{'xsrf_token': clear_token})
self.execute_all_deferred_tasks()
# After the index is cleared, it shouldn't match anything
response = self.get('search?query=cogito%20ergo%20sum')
self.assertNotIn('gcb-search-result', response.body)
def test_bad_search(self):
email = 'user@google.com'
actions.login(email, is_admin=False)
# %3A is a colon, and searching for only punctuation will cause App
# Engine's search to throw an error that should be handled
response = self.get('search?query=%3A')
self.assertEqual(response.status_int, 200)
self.assertIn('gcb-search-info', response.body)
def test_errors_not_displayed_to_user(self):
exception_code = '0xDEADBEEF'
def bad_fetch(*unused_vargs, **unused_kwargs):
raise Exception(exception_code)
self.swap(search, 'fetch', bad_fetch)
self.swap(logging, 'error', self.error_report)
response = self.get('search?query=cogito')
self.assertEqual(response.status_int, 200)
self.assertIn('unavailable', response.body)
self.assertNotIn('gcb-search-result', response.body)
self.assertIn('gcb-search-info', response.body)
self.assertIn(exception_code, self.logged_error)
def test_unicode_pages(self):
# TODO(emichael): Remove try, except, else when the unicode issue
# is fixed in dev_appserver.
try:
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None,
app_context=sites.get_all_courses()[0])
unit = course.add_unit()
unit.now_available = True
lesson_a = course.add_lesson(unit)
lesson_a.notes = search_unit_test.UNICODE_PAGE_URL
lesson_a.now_available = True
course.update_unit(unit)
course.save()
self.index_test_course()
self.swap(logging, 'error', self.error_report)
response = self.get('/test/search?query=paradox')
self.assertEqual('', self.logged_error)
self.assertNotIn('unavailable', response.body)
self.assertIn('gcb-search-result', response.body)
except AssertionError:
# Failing due to known unicode issue
pass
else:
raise AssertionError('Unicode search test should have failed. The '
'issue might now be fixed in dev_appserver.')
def test_external_links(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
unit = course.add_unit()
unit.now_available = True
lesson_a = course.add_lesson(unit)
lesson_a.notes = search_unit_test.VALID_PAGE_URL
objectives_link = 'http://objectiveslink.null/'
lesson_a.objectives = '<a href="%s"></a><a href="%s"></a>' % (
search_unit_test.LINKED_PAGE_URL, objectives_link)
lesson_a.now_available = True
course.update_unit(unit)
course.save()
self.index_test_course()
response = self.get('/test/search?query=What%20hath%20God%20wrought')
self.assertIn('gcb-search-result', response.body)
response = self.get('/test/search?query=Cogito')
self.assertIn('gcb-search-result', response.body)
self.assertIn(search_unit_test.VALID_PAGE_URL, response.body)
self.assertIn(objectives_link, response.body)
self.assertNotIn(search_unit_test.PDF_URL, response.body)
# If this test fails, indexing will crawl the entire web
response = self.get('/test/search?query=ABORT')
self.assertNotIn('gcb-search-result', response.body)
self.assertNotIn(search_unit_test.SECOND_LINK_PAGE_URL, response.body)
def test_youtube(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
default_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace('ns_test')
course = courses.Course(None,
app_context=sites.get_all_courses()[0])
unit = course.add_unit()
unit.now_available = True
lesson_a = course.add_lesson(unit)
lesson_a.video = 'portal'
lesson_a.now_available = True
lesson_b = course.add_lesson(unit)
lesson_b.objectives = '<gcb-youtube videoid="glados">'
lesson_b.now_available = True
course.update_unit(unit)
course.save()
entity = announcements.AnnouncementEntity()
entity.html = '<gcb-youtube videoid="aperature">'
entity.title = 'Sample Announcement'
entity.date = datetime.datetime.now().date()
entity.is_draft = False
entity.put()
self.index_test_course()
response = self.get('/test/search?query=apple')
self.assertIn('gcb-search-result', response.body)
self.assertIn('start=3.14', response.body)
self.assertIn('v=portal', response.body)
self.assertIn('v=glados', response.body)
self.assertIn('v=aperature', response.body)
self.assertIn('lemon', response.body)
self.assertIn('Medicus Quis', response.body)
self.assertIn('- YouTube', response.body)
self.assertIn('http://thumbnail.null', response.body)
# Test to make sure empty notes field doesn't cause a urlfetch
response = self.get('/test/search?query=cogito')
self.assertNotIn('gcb-search-result', response.body)
finally:
namespace_manager.set_namespace(default_namespace)
def test_announcements(self):
email = 'admin@google.com'
actions.login(email, is_admin=True)
self.get('announcements')
response = self.get('dashboard?action=search')
index_token = self.get_xsrf_token(response.body, 'gcb-index-course')
response = self.post('dashboard?action=index_course',
{'xsrf_token': index_token})
self.execute_all_deferred_tasks()
# This matches an announcement in the Power Searching course
response = self.get(
'search?query=Certificates%20qualifying%20participants')
self.assertIn('gcb-search-result', response.body)
self.assertIn('announcements#', response.body)
# The draft announcement in Power Searching should not be indexed
response = self.get('search?query=Welcome%20to%20the%20final%20class')
self.assertNotIn('gcb-search-result', response.body)
self.assertNotIn('announcements#', response.body)
def test_private_units_and_lessons(self):
sites.setup_courses('course:/test::ns_test, course:/:/')
course = courses.Course(None, app_context=sites.get_all_courses()[0])
unit1 = course.add_unit()
lesson11 = course.add_lesson(unit1)
lesson11.notes = search_unit_test.VALID_PAGE_URL
lesson11.objectives = search_unit_test.VALID_PAGE
lesson11.video = 'portal'
unit2 = course.add_unit()
lesson21 = course.add_lesson(unit2)
lesson21.notes = search_unit_test.VALID_PAGE_URL
lesson21.objectives = search_unit_test.VALID_PAGE
lesson21.video = 'portal'
unit1.now_available = True
lesson11.now_available = False
course.update_unit(unit1)
unit2.now_available = False
lesson21.now_available = True
course.update_unit(unit2)
course.save()
self.index_test_course()
response = self.get('/test/search?query=cogito%20ergo%20sum')
self.assertNotIn('gcb-search-result', response.body)
response = self.get('/test/search?query=apple')
self.assertNotIn('gcb-search-result', response.body)
self.assertNotIn('v=portal', response.body)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models.models."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
from models import models
from tests.functional import actions
# Disable complaints about docstrings for self-documenting tests.
# pylint: disable-msg=g-missing-docstring
class EventEntityTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
event = models.EventEntity(source='source', user_id='1')
key = event.put()
exported = event.for_export(self.transform)
self.assert_blacklisted_properties_removed(event, exported)
self.assertEqual('source', event.source)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(key, models.EventEntity.safe_key(key, self.transform))
class PersonalProfileTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly_and_sets_safe_key(self):
date_of_birth = datetime.date.today()
email = 'test@example.com'
legal_name = 'legal_name'
nick_name = 'nick_name'
user_id = '1'
profile = models.PersonalProfile(
date_of_birth=date_of_birth, email=email, key_name=user_id,
legal_name=legal_name, nick_name=nick_name)
profile.put()
exported = profile.for_export(self.transform)
self.assert_blacklisted_properties_removed(profile, exported)
self.assertEqual(
self.transform(user_id), exported.safe_key.name())
class QuestionDAOTestCase(actions.TestBase):
"""Functional tests for QuestionDAO."""
# Name determined by parent. pylint: disable-msg=g-bad-name
def setUp(self):
"""Sets up datastore contents."""
super(QuestionDAOTestCase, self).setUp()
self.used_twice_question_id = 1
self.used_twice_question_dto = models.QuestionDTO(
self.used_twice_question_id, {})
self.used_once_question_id = 2
self.used_once_question_dto = models.QuestionDTO(
self.used_once_question_id, {})
self.unused_question_id = 3
self.unused_question_dto = models.QuestionDTO(
self.unused_question_id, {})
models.QuestionDAO.save_all([
self.used_twice_question_dto, self.used_once_question_dto,
self.unused_question_dto])
# Handcoding the dicts. This is dangerous because they're handcoded
# elsewhere, the implementations could fall out of sync, and these tests
# may then pass erroneously.
self.first_question_group_description = 'first_question_group'
self.first_question_group_id = 4
self.first_question_group_dto = models.QuestionGroupDTO(
self.first_question_group_id,
{'description': self.first_question_group_description,
'items': [{'question': str(self.used_once_question_id)}]})
self.second_question_group_description = 'second_question_group'
self.second_question_group_id = 5
self.second_question_group_dto = models.QuestionGroupDTO(
self.second_question_group_id,
{'description': self.second_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
self.third_question_group_description = 'third_question_group'
self.third_question_group_id = 6
self.third_question_group_dto = models.QuestionGroupDTO(
self.third_question_group_id,
{'description': self.third_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
models.QuestionGroupDAO.save_all([
self.first_question_group_dto, self.second_question_group_dto,
self.third_question_group_dto])
def test_used_by_returns_description_of_single_question_group(self):
self.assertEqual(
[self.first_question_group_description],
models.QuestionDAO.used_by(self.used_once_question_id))
def test_used_by_returns_descriptions_of_multiple_question_groups(self):
self.assertEqual(
[self.second_question_group_description,
self.third_question_group_description],
models.QuestionDAO.used_by(self.used_twice_question_id))
def test_used_by_returns_empty_list_for_unused_question(self):
not_found_id = 7
self.assertFalse(models.QuestionDAO.load(not_found_id))
self.assertEqual([], models.QuestionDAO.used_by(not_found_id))
class StudentTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
user_id = '1'
student = models.Student(key_name='name', user_id='1', is_enrolled=True)
key = student.put()
exported = student.for_export(self.transform)
self.assert_blacklisted_properties_removed(student, exported)
self.assertTrue(exported.is_enrolled)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(
'transformed_' + user_id, exported.key_by_user_id.name())
self.assertEqual(
models.Student.safe_key(key, self.transform), exported.safe_key)
def test_get_key_does_not_transform_by_default(self):
user_id = 'user_id'
student = models.Student(key_name='name', user_id=user_id)
student.put()
self.assertEqual(user_id, student.get_key().name())
def test_safe_key_transforms_name(self):
key = models.Student(key_name='name').put()
self.assertEqual(
'transformed_name',
models.Student.safe_key(key, self.transform).name())
class StudentAnswersEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_name(self):
student_key = models.Student(key_name='name').put()
answers = models.StudentAnswersEntity(key_name=student_key.name())
answers_key = answers.put()
self.assertEqual(
'transformed_name',
models.StudentAnswersEntity.safe_key(
answers_key, self.transform).name())
class StudentPropertyEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_user_id_component(self):
user_id = 'user_id'
student = models.Student(key_name='email@example.com', user_id=user_id)
student.put()
property_name = 'property-name'
student_property_key = models.StudentPropertyEntity.create(
student, property_name).put()
self.assertEqual(
'transformed_%s-%s' % (user_id, property_name),
models.StudentPropertyEntity.safe_key(
student_property_key, self.transform).name())
| Python |
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/review/stats.py."""
__author__ = 'Sean Lip'
import actions
from actions import assert_contains
from actions import assert_equals
from controllers_review import get_review_payload
from controllers_review import get_review_step_key
from controllers_review import LEGACY_REVIEW_UNIT_ID
class PeerReviewAnalyticsTest(actions.TestBase):
"""Tests the peer review analytics page on the Course Author dashboard."""
def test_peer_review_analytics(self):
"""Test analytics page on course dashboard."""
student1 = 'student1@google.com'
name1 = 'Test Student 1'
student2 = 'student2@google.com'
name2 = 'Test Student 2'
peer = {'assessment_type': 'ReviewAssessmentExample'}
# Student 1 submits a peer review assessment.
actions.login(student1)
actions.register(self, name1)
actions.submit_assessment(self, 'ReviewAssessmentExample', peer)
actions.logout()
# Student 2 submits the same peer review assessment.
actions.login(student2)
actions.register(self, name2)
actions.submit_assessment(self, 'ReviewAssessmentExample', peer)
actions.logout()
email = 'admin@google.com'
# The admin looks at the analytics page on the dashboard.
actions.login(email, is_admin=True)
response = self.get('dashboard?action=analytics')
assert_contains(
'Google > Dashboard > Analytics', response.body)
assert_contains('have not been calculated yet', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert len(self.taskq.GetTasks('default')) == 4
response = self.get('dashboard?action=analytics')
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('were last updated at', response.body)
assert_contains('currently enrolled: 2', response.body)
assert_contains('total: 2', response.body)
assert_contains('Peer Review Statistics', response.body)
assert_contains('Sample peer review assignment', response.body)
# JSON code for the completion statistics.
assert_contains('"[{\\"stats\\": [2]', response.body)
actions.logout()
# Student2 requests a review.
actions.login(student2)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('Assignment to review', response.body)
# Student2 submits the review.
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
get_review_payload('R2for1'))
assert_contains(
'Your review has been submitted successfully', response.body)
actions.logout()
actions.login(email, is_admin=True)
response = self.get('dashboard?action=analytics')
assert_contains(
'Google > Dashboard > Analytics', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=analytics')
assert_contains('Peer Review Statistics', response.body)
# JSON code for the completion statistics.
assert_contains('"[{\\"stats\\": [1, 1]', response.body)
actions.logout()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""A collection of actions for testing Course Builder pages."""
import cgi
import functools
import logging
import os
import re
import urllib
import appengine_config
from controllers import sites
from controllers import utils
import main
from models import config
from models import custom_modules
from models import transforms
from tests import suite
from google.appengine.api import namespace_manager
# All URLs referred to from all the pages.
UNIQUE_URLS_FOUND = {}
BASE_HOOK_POINTS = [
'<!-- base.before_head_tag_ends -->',
'<!-- base.after_body_tag_begins -->',
'<!-- base.after_navbar_begins -->',
'<!-- base.before_navbar_ends -->',
'<!-- base.after_top_content_ends -->',
'<!-- base.after_main_content_ends -->',
'<!-- base.before_body_tag_ends -->']
UNIT_HOOK_POINTS = [
'<!-- unit.after_leftnav_begins -->',
'<!-- unit.before_leftnav_ends -->',
'<!-- unit.after_content_begins -->',
'<!-- unit.before_content_ends -->']
PREVIEW_HOOK_POINTS = [
'<!-- preview.after_top_content_ends -->',
'<!-- preview.after_main_content_ends -->']
class ShouldHaveFailedByNow(Exception):
"""Special exception raised when a prior method did not raise."""
pass
class TestBase(suite.AppEngineTestBase):
"""Contains methods common to all functional tests."""
last_request_url = None
def getApp(self): # pylint: disable-msg=g-bad-name
main.debug = True
sites.ApplicationRequestHandler.bind(main.namespaced_routes)
return main.app
def assert_default_namespace(self):
ns = namespace_manager.get_namespace()
if ns != appengine_config.DEFAULT_NAMESPACE_NAME:
raise Exception('Expected default namespace, found: %s' % ns)
def setUp(self): # pylint: disable-msg=g-bad-name
super(TestBase, self).setUp()
self.supports_editing = False
self.assert_default_namespace()
self.namespace = ''
self.base = '/'
# Reload all properties now to flush the values modified in other tests.
config.Registry.get_overrides(True)
def tearDown(self): # pylint: disable-msg=g-bad-name
self.assert_default_namespace()
super(TestBase, self).tearDown()
def canonicalize(self, href, response=None):
"""Create absolute URL using <base> if defined, self.base otherwise."""
if href.startswith('/') or utils.ApplicationHandler.is_absolute(href):
pass
else:
base = self.base
if response:
match = re.search(
r'<base href=[\'"]?([^\'" >]+)', response.body)
if match and not href.startswith('/'):
base = match.groups()[0]
if not base.endswith('/'):
base += '/'
href = '%s%s' % (base, href)
self.audit_url(href)
return href
def audit_url(self, url):
"""Record for audit purposes the URL we encountered."""
UNIQUE_URLS_FOUND[url] = True
def hook_response(self, response):
"""Modify response.goto() to compute URL using <base>, if defined."""
if response.status_int == 200:
self.check_response_hrefs(response)
self.last_request_url = self.canonicalize(response.request.path)
gotox = response.goto
def new_goto(href, method='get', **args):
return gotox(self.canonicalize(href), method, **args)
response.goto = new_goto
return response
def check_response_hrefs(self, response):
"""Check response page URLs are properly formatted/canonicalized."""
hrefs = re.findall(r'href=[\'"]?([^\'" >]+)', response.body)
srcs = re.findall(r'src=[\'"]?([^\'" >]+)', response.body)
for url in hrefs + srcs:
# We expect all internal URLs to be relative: 'asset/css/main.css',
# and use <base> tag. All others URLs must be whitelisted below.
if url.startswith('/'):
absolute = url.startswith('//')
root = url == '/'
canonical = url.startswith(self.base)
allowed = self.url_allowed(url)
if not (absolute or root or canonical or allowed):
raise Exception('Invalid reference \'%s\' in:\n%s' % (
url, response.body))
self.audit_url(self.canonicalize(url, response=response))
def url_allowed(self, url):
"""Check whether a URL should be allowed as a href in the response."""
if url.startswith('/_ah/'):
return True
global_routes = []
for module in custom_modules.Registry.registered_modules.values():
for route, unused_handler in module.global_routes:
global_routes.append(route)
if any(re.match(route, url) for route in global_routes):
return True
return False
def get(self, url, **kwargs):
url = self.canonicalize(url)
logging.info('HTTP Get: %s', url)
response = self.testapp.get(url, **kwargs)
return self.hook_response(response)
def post(self, url, params, expect_errors=False):
url = self.canonicalize(url)
logging.info('HTTP Post: %s', url)
response = self.testapp.post(url, params, expect_errors=expect_errors)
return self.hook_response(response)
def put(self, url, params, expect_errors=False):
url = self.canonicalize(url)
logging.info('HTTP Put: %s', url)
response = self.testapp.put(url, params, expect_errors=expect_errors)
return self.hook_response(response)
def click(self, response, name):
logging.info('Link click: %s', name)
response = response.click(name)
return self.hook_response(response)
def submit(self, form):
logging.info('Form submit: %s', form)
response = form.submit()
return self.hook_response(response)
class ExportTestBase(TestBase):
"""Base test class for classes that implement export functionality.
If your entities.BaseEntity class implements a custom for_export or
safe_key, you probably want to test them with this TestCase.
"""
def assert_blacklisted_properties_removed(self, original_model, exported):
# Treating as module-protected. pylint: disable-msg=protected-access
for prop in original_model._get_export_blacklist():
self.assertFalse(hasattr(exported, prop.name))
def transform(self, value):
return 'transformed_' + value
def assert_equals(actual, expected):
if expected != actual:
raise Exception('Expected \'%s\', does not match actual \'%s\'.' %
(expected, actual))
def to_unicode(text):
"""Converts text to Unicode if is not Unicode already."""
if not isinstance(text, unicode):
return unicode(text, 'utf-8')
return text
def assert_contains(needle, haystack, collapse_whitespace=False):
needle = to_unicode(needle)
haystack = to_unicode(haystack)
if collapse_whitespace:
haystack = ' '.join(haystack.replace('\n', ' ').split())
if needle not in haystack:
raise Exception('Can\'t find \'%s\' in \'%s\'.' % (needle, haystack))
def assert_contains_all_of(needles, haystack):
haystack = to_unicode(haystack)
for needle in needles:
needle = to_unicode(needle)
if needle not in haystack:
raise Exception(
'Can\'t find \'%s\' in \'%s\'.' % (needle, haystack))
def assert_does_not_contain(needle, haystack, collapse_whitespace=False):
needle = to_unicode(needle)
haystack = to_unicode(haystack)
if collapse_whitespace:
haystack = ' '.join(haystack.replace('\n', ' ').split())
if needle in haystack:
raise Exception('Found \'%s\' in \'%s\'.' % (needle, haystack))
def assert_contains_none_of(needles, haystack):
haystack = to_unicode(haystack)
for needle in needles:
needle = to_unicode(needle)
if needle in haystack:
raise Exception('Found \'%s\' in \'%s\'.' % (needle, haystack))
def assert_none_fail(browser, callbacks):
"""Invokes all callbacks and expects each one not to fail."""
for callback in callbacks:
callback(browser)
def assert_at_least_one_succeeds(callbacks):
"""Invokes all callbacks and expects at least one to succeed."""
for callback in callbacks:
try:
callback()
return True
except Exception: # pylint: disable-msg=broad-except
pass
raise Exception('All callbacks failed.')
def assert_all_fail(browser, callbacks):
"""Invokes all callbacks and expects each one to fail."""
for callback in callbacks:
try:
callback(browser)
raise ShouldHaveFailedByNow(
'Expected to fail: %s().' % callback.__name__)
except ShouldHaveFailedByNow as e:
raise e
except Exception:
pass
def get_form_by_action(response, action):
"""Gets a form give an action string or returns None."""
form = None
try:
form = next(
form for form in response.forms.values() if form.action == action)
except StopIteration:
pass
return form
def login(email, is_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = email
is_admin_value = '0'
if is_admin:
is_admin_value = '1'
os.environ['USER_IS_ADMIN'] = is_admin_value
def get_current_user_email():
email = os.environ['USER_EMAIL']
if not email:
raise Exception('No current user.')
return email
def logout():
del os.environ['USER_EMAIL']
del os.environ['USER_ID']
del os.environ['USER_IS_ADMIN']
def register(browser, name):
"""Registers a new student with the given name."""
response = view_registration(browser)
register_form = get_form_by_action(response, 'register')
register_form.set('form01', name)
response = browser.submit(register_form)
assert_equals(response.status_int, 302)
assert_contains(
'course#registration_confirmation', response.headers['location'])
check_profile(browser, name)
return response
def check_profile(browser, name):
response = view_my_profile(browser)
assert_contains('Email', response.body)
assert_contains(cgi.escape(name), response.body)
assert_contains(get_current_user_email(), response.body)
return response
def view_registration(browser):
response = browser.get('register')
check_personalization(browser, response)
assert_contains('What is your name?', response.body)
assert_contains_all_of([
'<!-- reg_form.additional_registration_fields -->'], response.body)
return response
def register_with_additional_fields(browser, name, data2, data3):
"""Registers a new student with customized registration form."""
response = browser.get('/')
assert_equals(response.status_int, 302)
response = view_registration(browser)
register_form = get_form_by_action(response, 'register')
register_form.set('form01', name)
register_form.set('form02', data2)
register_form.set('form03', data3)
response = browser.submit(register_form)
assert_equals(response.status_int, 302)
assert_contains(
'course#registration_confirmation', response.headers['location'])
check_profile(browser, name)
def check_logout_link(response_body):
assert_contains(get_current_user_email(), response_body)
def check_login_link(response_body):
assert_contains('Login', response_body)
def check_personalization(browser, response):
"""Checks that the login/logout text is correct."""
sites.set_path_info(browser.last_request_url)
app_context = sites.get_course_for_current_request()
sites.unset_path_info()
browsable = app_context.get_environ()['course']['browsable']
if browsable:
callbacks = [
functools.partial(check_login_link, response.body),
functools.partial(check_logout_link, response.body)
]
assert_at_least_one_succeeds(callbacks)
else:
check_logout_link(response.body)
def view_preview(browser):
"""Views /preview page."""
response = browser.get('preview')
assert_contains(' the stakes are high.', response.body)
assert_contains(
'<li><p class="gcb-top-content">Pre-course assessment</p></li>',
response.body)
assert_contains_none_of(UNIT_HOOK_POINTS, response.body)
assert_contains_all_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_course(browser):
"""Views /course page."""
response = browser.get('course')
assert_contains(' the stakes are high.', response.body)
assert_contains('<a href="assessment?name=Pre">Pre-course assessment</a>',
response.body)
check_personalization(browser, response)
assert_contains_all_of(BASE_HOOK_POINTS, response.body)
assert_contains_none_of(UNIT_HOOK_POINTS, response.body)
assert_contains_none_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_unit(browser):
"""Views /unit page."""
response = browser.get('unit?unit=1&lesson=1')
assert_contains('Unit 1 - Introduction', response.body)
assert_contains('1.3 How search works', response.body)
assert_contains('1.6 Finding text on a web page', response.body)
assert_contains('https://www.youtube.com/embed/1ppwmxidyIE', response.body)
check_personalization(browser, response)
assert_contains_all_of(BASE_HOOK_POINTS, response.body)
assert_contains_all_of(UNIT_HOOK_POINTS, response.body)
assert_contains_none_of(PREVIEW_HOOK_POINTS, response.body)
return response
def view_activity(browser):
response = browser.get('activity?unit=1&lesson=2')
assert_contains('<script src="assets/js/activity-1.2.js"></script>',
response.body)
check_personalization(browser, response)
return response
def get_activity(browser, unit_id, lesson_id, args):
"""Retrieve the activity page for a given unit and lesson id."""
response = browser.get('activity?unit=%s&lesson=%s' % (unit_id, lesson_id))
assert_equals(response.status_int, 200)
assert_contains(
'<script src="assets/js/activity-%s.%s.js"></script>' %
(unit_id, lesson_id), response.body)
assert_contains('assets/lib/activity-generic-1.3.js', response.body)
js_response = browser.get('assets/lib/activity-generic-1.3.js')
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'eventXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
return response, args
def attempt_activity(browser, unit_id, lesson_id, index, answer, correct):
"""Attempts an activity in a given unit and lesson."""
response, args = get_activity(browser, unit_id, lesson_id, {})
# Prepare activity submission event.
args['source'] = 'attempt-activity'
args['payload'] = {
'index': index,
'type': 'activity-choice',
'value': answer,
'correct': correct
}
args['payload']['location'] = (
'http://localhost:8080/activity?unit=%s&lesson=%s' %
(unit_id, lesson_id))
args['payload'] = transforms.dumps(args['payload'])
# Submit the request to the backend.
response = browser.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(args)}), {})
assert_equals(response.status_int, 200)
assert not response.body
def view_announcements(browser):
response = browser.get('announcements')
assert_equals(response.status_int, 200)
return response
def view_my_profile(browser):
response = browser.get('student/home')
assert_contains('Date enrolled', response.body)
check_personalization(browser, response)
return response
def view_forum(browser):
response = browser.get('forum')
assert_contains('document.getElementById("forum_embed").src =',
response.body)
check_personalization(browser, response)
return response
def view_assessments(browser):
for name in ['Pre', 'Mid', 'Fin']:
response = browser.get('assessment?name=%s' % name)
assert 'assets/js/assessment-%s.js' % name in response.body
assert_equals(response.status_int, 200)
check_personalization(browser, response)
def submit_assessment(browser, unit_id, args, presubmit_checks=True):
"""Submits an assessment."""
response = browser.get('assessment?name=%s' % unit_id)
if presubmit_checks:
assert_contains(
'<script src="assets/js/assessment-%s.js"></script>' % unit_id,
response.body)
js_response = browser.get('assets/js/assessment-%s.js' % unit_id)
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'assessmentXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
response = browser.post('answer', args)
assert_equals(response.status_int, 200)
return response
def request_new_review(browser, unit_id, expected_status_code=302):
"""Requests a new assignment to review."""
response = browser.get('reviewdashboard?unit=%s' % unit_id)
assert_contains('Assignments for your review', response.body)
# Extract XSRF token from the page.
match = re.search(
r'<input type="hidden" name="xsrf_token"\s* value="([^"]*)">',
response.body)
assert match
xsrf_token = match.group(1)
args = {'xsrf_token': xsrf_token}
expect_errors = (expected_status_code not in [200, 302])
response = browser.post(
'reviewdashboard?unit=%s' % unit_id, args, expect_errors=expect_errors)
assert_equals(response.status_int, expected_status_code)
if expected_status_code == 302:
assert_equals(response.status_int, expected_status_code)
assert_contains(
'review?unit=%s' % unit_id, response.location)
response = browser.get(response.location)
assert_contains('Assignment to review', response.body)
return response
def view_review(browser, unit_id, review_step_key, expected_status_code=200):
"""View a review page."""
response = browser.get(
'review?unit=%s&key=%s' % (unit_id, review_step_key),
expect_errors=(expected_status_code != 200))
assert_equals(response.status_int, expected_status_code)
if expected_status_code == 200:
assert_contains('Assignment to review', response.body)
return response
def submit_review(
browser, unit_id, review_step_key, args, presubmit_checks=True):
"""Submits a review."""
response = browser.get(
'review?unit=%s&key=%s' % (unit_id, review_step_key))
if presubmit_checks:
assert_contains(
'<script src="assets/js/review-%s.js"></script>' % unit_id,
response.body)
js_response = browser.get('assets/js/review-%s.js' % unit_id)
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'assessmentXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
args['key'] = review_step_key
args['unit_id'] = unit_id
response = browser.post('review', args)
assert_equals(response.status_int, 200)
return response
def add_reviewer(browser, unit_id, reviewee_email, reviewer_email):
"""Adds a reviewer to a submission."""
url_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_email,
'unit_id': unit_id,
}
response = browser.get('/dashboard?%s' % urllib.urlencode(url_params))
# Extract XSRF token from the page.
match = re.search(
r'<input type="hidden" name="xsrf_token"\s* value="([^"]*)">',
response.body)
assert match
xsrf_token = match.group(1)
args = {
'xsrf_token': xsrf_token,
'reviewer_id': reviewer_email,
'reviewee_id': reviewee_email,
'unit_id': unit_id,
}
response = browser.post('/dashboard?action=add_reviewer', args)
return response
def change_name(browser, new_name):
"""Change the name of a student."""
response = browser.get('student/home')
edit_form = get_form_by_action(response, 'student/editstudent')
edit_form.set('name', new_name)
response = browser.submit(edit_form)
assert_equals(response.status_int, 302)
check_profile(browser, new_name)
def unregister(browser):
"""Unregister a student."""
response = browser.get('student/home')
response = browser.click(response, 'Unenroll')
assert_contains('to unenroll from', response.body)
unregister_form = get_form_by_action(response, 'student/unenroll')
browser.submit(unregister_form)
class Permissions(object):
"""Defines who can see what."""
@classmethod
def get_browsable_pages(cls):
"""Returns all pages that can be accessed by a logged-out user."""
return [view_announcements, view_forum, view_course, view_unit,
view_assessments, view_activity]
@classmethod
def get_nonbrowsable_pages(cls):
"""Returns all non-browsable pages."""
return [view_preview, view_my_profile, view_registration]
@classmethod
def get_logged_out_allowed_pages(cls):
"""Returns all pages that a logged-out user can see."""
return [view_announcements, view_preview]
@classmethod
def get_logged_out_denied_pages(cls):
"""Returns all pages that a logged-out user can't see."""
return [view_forum, view_course, view_assessments,
view_unit, view_activity, view_my_profile, view_registration]
@classmethod
def get_enrolled_student_allowed_pages(cls):
"""Returns all pages that a logged-in, enrolled student can see."""
return [view_announcements, view_forum, view_course,
view_assessments, view_unit, view_activity, view_my_profile]
@classmethod
def get_enrolled_student_denied_pages(cls):
"""Returns all pages that a logged-in, enrolled student can't see."""
return [view_registration, view_preview]
@classmethod
def get_unenrolled_student_allowed_pages(cls):
"""Returns all pages that a logged-in, unenrolled student can see."""
return [view_announcements, view_registration, view_preview]
@classmethod
def get_unenrolled_student_denied_pages(cls):
"""Returns all pages that a logged-in, unenrolled student can't see."""
pages = Permissions.get_enrolled_student_allowed_pages()
for allowed in Permissions.get_unenrolled_student_allowed_pages():
if allowed in pages:
pages.remove(allowed)
return pages
@classmethod
def assert_can_browse(cls, browser):
"""Check that pages for a browsing user are visible."""
assert_none_fail(browser, Permissions.get_browsable_pages())
assert_all_fail(browser, Permissions.get_nonbrowsable_pages())
@classmethod
def assert_logged_out(cls, browser):
"""Check that only pages for a logged-out user are visible."""
assert_none_fail(browser, Permissions.get_logged_out_allowed_pages())
assert_all_fail(browser, Permissions.get_logged_out_denied_pages())
@classmethod
def assert_enrolled(cls, browser):
"""Check that only pages for an enrolled student are visible."""
assert_none_fail(
browser, Permissions.get_enrolled_student_allowed_pages())
assert_all_fail(
browser, Permissions.get_enrolled_student_denied_pages())
@classmethod
def assert_unenrolled(cls, browser):
"""Check that only pages for an unenrolled student are visible."""
assert_none_fail(
browser, Permissions.get_unenrolled_student_allowed_pages())
assert_all_fail(
browser, Permissions.get_unenrolled_student_denied_pages())
| Python |
# coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for controllers pertaining to peer review assessments."""
__author__ = 'Sean Lip'
from controllers import sites
from models import transforms
import actions
from actions import assert_contains
from actions import assert_does_not_contain
from actions import assert_equals
# The unit id for the peer review assignment in the default course.
LEGACY_REVIEW_UNIT_ID = 'ReviewAssessmentExample'
def get_review_step_key(response):
"""Returns the review step key in a request query parameter."""
request_query_string = response.request.environ['QUERY_STRING']
return request_query_string[request_query_string.find('key=') + 4:]
def get_review_payload(identifier, is_draft=False):
"""Returns a sample review payload."""
review = transforms.dumps([
{'index': 0, 'type': 'choices', 'value': '0', 'correct': False},
{'index': 1, 'type': 'regex', 'value': identifier, 'correct': True}
])
return {
'answers': review,
'is_draft': 'true' if is_draft else 'false',
}
class PeerReviewControllerTest(actions.TestBase):
"""Test peer review from the Student perspective."""
def test_submit_assignment(self):
"""Test submission of peer-reviewed assignments."""
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['browsable'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
email = 'test_peer_reviewed_assignment_submission@google.com'
name = 'Test Peer Reviewed Assignment Submission'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'First answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'First answer to Q3',
'correct': True},
])
second_submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'Second answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'Second answer to Q3',
'correct': True},
])
# Check that the sample peer-review assignment shows up in the preview
# page.
response = actions.view_preview(self)
assert_contains('Sample peer review assignment', response.body)
assert_does_not_contain('Review peer assignments', response.body)
actions.login(email)
actions.register(self, name)
# Check that the sample peer-review assignment shows up in the course
# page and that it can be visited.
response = actions.view_course(self)
assert_contains('Sample peer review assignment', response.body)
assert_contains('Review peer assignments', response.body)
assert_contains(
'<a href="assessment?name=%s">' % LEGACY_REVIEW_UNIT_ID,
response.body)
assert_contains('<span> Review peer assignments </span>', response.body,
collapse_whitespace=True)
assert_does_not_contain('<a href="reviewdashboard', response.body,
collapse_whitespace=True)
# Check that the progress circle for this assignment is unfilled.
assert_contains(
'progress-notstarted-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
assert_does_not_contain(
'progress-completed-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
# Try to access an invalid assignment.
response = self.get(
'assessment?name=FakeAssessment', expect_errors=True)
assert_equals(response.status_int, 404)
# The student should not be able to see others' reviews because he/she
# has not submitted an assignment yet.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_does_not_contain('Submitted assignment', response.body)
assert_contains('Due date for this assignment', response.body)
assert_does_not_contain('Reviews received', response.body)
# The student should not be able to access the review dashboard because
# he/she has not submitted the assignment yet.
response = self.get(
'reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID,
expect_errors=True)
assert_contains('You must submit the assignment for', response.body)
# The student submits the assignment.
response = actions.submit_assessment(
self,
LEGACY_REVIEW_UNIT_ID,
{'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
)
assert_contains(
'Thank you for completing this assignment', response.body)
assert_contains('Review peer assignments', response.body)
# The student views the submitted assignment, which has become readonly.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('First answer to Q1', response.body)
assert_contains('Submitted assignment', response.body)
# The student tries to re-submit the same assignment. This should fail.
response = actions.submit_assessment(
self,
LEGACY_REVIEW_UNIT_ID,
{'answers': second_submission,
'assessment_type': LEGACY_REVIEW_UNIT_ID},
presubmit_checks=False
)
assert_contains(
'You have already submitted this assignment.', response.body)
assert_contains('Review peer assignments', response.body)
# The student views the submitted assignment. The new answers have not
# been saved.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('First answer to Q1', response.body)
assert_does_not_contain('Second answer to Q1', response.body)
# The student checks the course page and sees that the progress
# circle for this assignment has been filled, and that the 'Review
# peer assignments' link is now available.
response = actions.view_course(self)
assert_contains(
'progress-completed-%s' % LEGACY_REVIEW_UNIT_ID, response.body)
assert_does_not_contain(
'<span> Review peer assignments </span>', response.body,
collapse_whitespace=True)
assert_contains(
'<a href="reviewdashboard?unit=%s">' % LEGACY_REVIEW_UNIT_ID,
response.body, collapse_whitespace=True)
# The student should also be able to now view the review dashboard.
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignments for your review', response.body)
assert_contains('Review a new assignment', response.body)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_handling_of_fake_review_step_key(self):
"""Test that bad keys result in the appropriate responses."""
email = 'student1@google.com'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
actions.submit_assessment(self, LEGACY_REVIEW_UNIT_ID, payload)
actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, 'Fake key',
expected_status_code=404)
actions.logout()
def test_not_enough_assignments_to_allocate(self):
"""Test for the case when there are too few assignments in the pool."""
email = 'student1@google.com'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# The student goes to the review dashboard and requests an assignment
# to review -- but there is nothing to review.
response = actions.request_new_review(
self, LEGACY_REVIEW_UNIT_ID, expected_status_code=200)
assert_does_not_contain('Assignment to review', response.body)
assert_contains('Sorry, there are no new submissions ', response.body)
assert_contains('disabled="true"', response.body)
actions.logout()
def test_reviewer_cannot_impersonate_another_reviewer(self):
"""Test that one reviewer cannot use another's review step key."""
email1 = 'student1@google.com'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = 'student2@google.com'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = 'student3@google.com'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
# Student 2 requests a review, and is given Student 1's assignment.
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('S1-1', response.body)
actions.logout()
# Student 3 logs in, and submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
# Student 3 tries to view Student 1's assignment using Student 2's
# review step key, but is not allowed to.
response = actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
expected_status_code=404)
# Student 3 logs out.
actions.logout()
def test_student_cannot_see_reviews_prematurely(self):
"""Test that students cannot see others' reviews prematurely."""
email = 'student1@google.com'
name = 'Student 1'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# Student 1 cannot see the reviews for his assignment yet, because he
# has not submitted the two required reviews.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('Due date for this assignment', response.body)
assert_contains(
'After you have completed the required number of peer reviews',
response.body)
actions.logout()
def test_draft_review_behaviour(self):
"""Test correctness of draft review visibility."""
email1 = 'student1@google.com'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = 'student2@google.com'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = 'student3@google.com'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
# Student 2 requests a review, and is given Student 1's assignment.
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
review_step_key_2_for_1 = get_review_step_key(response)
assert_contains('S1-1', response.body)
# Student 2 saves her review as a draft.
review_2_for_1_payload = get_review_payload(
'R2for1', is_draft=True)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
review_2_for_1_payload)
assert_contains('Your review has been saved.', response.body)
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('(Draft)', response.body)
# Student 2's draft is still changeable.
response = actions.view_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1)
assert_contains('Submit Review', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
review_2_for_1_payload)
assert_contains('Your review has been saved.', response.body)
# Student 2 logs out.
actions.logout()
# Student 3 submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
actions.logout()
# Student 1 logs in and requests two assignments to review.
actions.login(email1)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone = get_review_step_key(response)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone_else = get_review_step_key(response)
response = self.get('reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('disabled="true"', response.body)
# Student 1 submits both reviews, fulfilling his quota.
review_1_for_other_payload = get_review_payload('R1for')
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone,
review_1_for_other_payload)
assert_contains(
'Your review has been submitted successfully', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone_else,
review_1_for_other_payload)
assert_contains(
'Your review has been submitted successfully', response.body)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
assert_contains('(Completed)', response.body)
assert_does_not_contain('(Draft)', response.body)
# Although Student 1 has submitted 2 reviews, he cannot view Student
# 2's review because it is still in Draft status.
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains(
'You have not received any peer reviews yet.', response.body)
assert_does_not_contain('R2for1', response.body)
# Student 1 logs out.
actions.logout()
# Student 2 submits her review for Student 1's assignment.
actions.login(email2)
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1))
assert_does_not_contain('Submitted review', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1,
get_review_payload('R2for1'))
assert_contains(
'Your review has been submitted successfully', response.body)
# Her review is now read-only.
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_2_for_1))
assert_contains('Submitted review', response.body)
assert_contains('R2for1', response.body)
# Student 2 logs out.
actions.logout()
# Now Student 1 can see the review he has received from Student 2.
actions.login(email1)
response = self.get('assessment?name=%s' % LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('R2for1', response.body)
def test_independence_of_draft_reviews(self):
"""Test that draft reviews do not interfere with each other."""
email1 = 'student1@google.com'
name1 = 'Student 1'
submission1 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S1-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'is-S1', 'correct': True},
])
payload1 = {
'answers': submission1, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email2 = 'student2@google.com'
name2 = 'Student 2'
submission2 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S2-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload2 = {
'answers': submission2, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
email3 = 'student3@google.com'
name3 = 'Student 3'
submission3 = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'S3-1', 'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'not-S1', 'correct': True},
])
payload3 = {
'answers': submission3, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
# Student 1 submits the assignment.
actions.login(email1)
actions.register(self, name1)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload1)
actions.logout()
# Student 2 logs in and submits the assignment.
actions.login(email2)
actions.register(self, name2)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload2)
actions.logout()
# Student 3 logs in and submits the assignment.
actions.login(email3)
actions.register(self, name3)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload3)
actions.logout()
# Student 1 logs in and requests two assignments to review.
actions.login(email1)
response = self.get('/reviewdashboard?unit=%s' % LEGACY_REVIEW_UNIT_ID)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone = get_review_step_key(response)
response = actions.request_new_review(self, LEGACY_REVIEW_UNIT_ID)
assert_equals(response.status_int, 200)
assert_contains('Assignment to review', response.body)
assert_contains('not-S1', response.body)
review_step_key_1_for_someone_else = get_review_step_key(response)
self.assertNotEqual(
review_step_key_1_for_someone, review_step_key_1_for_someone_else)
# Student 1 submits two draft reviews.
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone,
get_review_payload('R1forFirst', is_draft=True))
assert_contains('Your review has been saved.', response.body)
response = actions.submit_review(
self, LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone_else,
get_review_payload('R1forSecond', is_draft=True))
assert_contains('Your review has been saved.', response.body)
# The two draft reviews should still be different when subsequently
# accessed.
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone))
assert_contains('R1forFirst', response.body)
response = self.get('review?unit=%s&key=%s' % (
LEGACY_REVIEW_UNIT_ID, review_step_key_1_for_someone_else))
assert_contains('R1forSecond', response.body)
# Student 1 logs out.
actions.logout()
class PeerReviewDashboardTest(actions.TestBase):
"""Test peer review from the Admin perspective."""
def test_add_reviewer(self):
"""Test that admin can add a reviewer, and cannot re-add reviewers."""
email = 'test_add_reviewer@google.com'
name = 'Test Add Reviewer'
submission = transforms.dumps([
{'index': 0, 'type': 'regex', 'value': 'First answer to Q1',
'correct': True},
{'index': 1, 'type': 'choices', 'value': 3, 'correct': False},
{'index': 2, 'type': 'regex', 'value': 'First answer to Q3',
'correct': True},
])
payload = {
'answers': submission, 'assessment_type': LEGACY_REVIEW_UNIT_ID}
actions.login(email)
actions.register(self, name)
response = actions.submit_assessment(
self, LEGACY_REVIEW_UNIT_ID, payload)
# There is nothing to review on the review dashboard.
response = actions.request_new_review(
self, LEGACY_REVIEW_UNIT_ID, expected_status_code=200)
assert_does_not_contain('Assignment to review', response.body)
assert_contains('Sorry, there are no new submissions ', response.body)
actions.logout()
# The admin assigns the student to review his own work.
actions.login(email, is_admin=True)
response = actions.add_reviewer(
self, LEGACY_REVIEW_UNIT_ID, email, email)
assert_equals(response.status_int, 302)
response = self.get(response.location)
assert_does_not_contain(
'Error 412: The reviewer is already assigned', response.body)
assert_contains('First answer to Q1', response.body)
assert_contains(
'Review 1 from test_add_reviewer@google.com', response.body)
# The admin repeats the 'add reviewer' action. This should fail.
response = actions.add_reviewer(
self, LEGACY_REVIEW_UNIT_ID, email, email)
assert_equals(response.status_int, 302)
response = self.get(response.location)
assert_contains(
'Error 412: The reviewer is already assigned', response.body)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models/utils.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import counters
from models import utils
from tests.functional import actions
from google.appengine.ext import db
class Model(db.Model):
create_date = db.DateTimeProperty(auto_now=True, indexed=True)
number = db.IntegerProperty(indexed=True)
string = db.StringProperty()
def process(model, number, string=None):
model.number = number
model.string = string
db.put(model)
def stop_mapping_at_5(model):
if model.number == 5:
raise utils.StopMapping
class QueryMapperTest(actions.TestBase):
"""Tests for utils.QueryMapper."""
def test_raising_stop_mapping_stops_execution(self):
db.put([Model(number=x) for x in xrange(11)])
num_processed = utils.QueryMapper(
Model.all().order('number')).run(stop_mapping_at_5)
self.assertEqual(5, num_processed)
def test_run_processes_empty_result_set(self):
self.assertEqual(
0, utils.QueryMapper(Model.all()).run(process, 1, string='foo'))
def test_run_processes_one_entity(self):
"""Tests that we can process < batch_size results."""
Model().put()
num_processed = utils.QueryMapper(
Model.all()).run(process, 1, string='foo')
model = Model.all().get()
self.assertEqual(1, num_processed)
self.assertEqual(1, model.number)
self.assertEqual('foo', model.string)
def test_run_process_more_than_1000_entities(self):
"""Tests we can process more entities than the old limit of 1k."""
counter = counters.PerfCounter(
'test-run-process-more-than-1000-entities-counter',
'counter for testing increment by QueryMapper')
db.put([Model() for _ in xrange(1001)])
# Also pass custom args to QueryMapper ctor.
num_processed = utils.QueryMapper(
Model.all(), batch_size=50, counter=counter, report_every=0
).run(process, 1, string='foo')
last_written = Model.all().order('-create_date').get()
self.assertEqual(1001, counter.value)
self.assertEqual(1001, num_processed)
self.assertEqual(1, last_written.number)
self.assertEqual('foo', last_written.string)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/upload/upload.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
import os
from controllers import utils
from models import models
from models import student_work
from modules.upload import upload
from tests.functional import actions
from google.appengine.ext import db
class TextFileUploadHandlerTestCase(actions.TestBase):
"""Tests for TextFileUploadHandler."""
# Name inherited from parent.
# pylint: disable-msg=g-bad-name
# Treating code under test as module-protected.
# pylint: disable-msg=protected-access
# Don't write repetative docstrings for well-named tests.
# pylint: disable-msg=g-missing-docstring
def setUp(self):
super(TextFileUploadHandlerTestCase, self).setUp()
self.contents = 'contents'
self.email = 'user@example.com'
self.headers = {'referer': 'http://localhost/path?query=value#fragment'}
self.unit_id = '1'
self.user_id = '2'
self.student = models.Student(
is_enrolled=True, key_name=self.email, user_id=self.user_id)
self.student.put()
self.xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
def tearDown(self):
upload.custom_module.disable()
super(TextFileUploadHandlerTestCase, self).tearDown()
def configure_environ_for_current_user(self):
os.environ['USER_EMAIL'] = self.email
os.environ['USER_ID'] = self.user_id
def get_submission(self, student_key, unit_id):
return db.get(student_work.Submission.get_key(unit_id, student_key))
def test_bad_xsrf_token_returns_400(self):
response = self.testapp.post(
upload._POST_ACTION_SUFFIX,
{'form_xsrf_token': 'bad'}, self.headers, expect_errors=True)
self.assertEqual(400, response.status_int)
def test_creates_new_submission(self):
self.configure_environ_for_current_user()
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'contents': self.contents,
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
self.assertIsNone(self.get_submission(self.student.key(), self.user_id))
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers)
self.assertEqual(200, response.status_int)
submissions = student_work.Submission.all().fetch(2)
self.assertEqual(1, len(submissions))
self.assertEqual(u'"%s"' % self.contents, submissions[0].contents)
def test_empty_contents_returns_400(self):
self.configure_environ_for_current_user()
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'contents': '',
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers,
expect_errors=True)
self.assertEqual(400, response.status_int)
def test_missing_contents_returns_400(self):
self.configure_environ_for_current_user()
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers,
expect_errors=True)
self.assertEqual(400, response.status_int)
def test_missing_student_returns_403(self):
response = self.testapp.post(
upload._POST_ACTION_SUFFIX,
{'form_xsrf_token': self.xsrf_token}, self.headers,
expect_errors=True)
self.assertEqual(403, response.status_int)
def test_missing_xsrf_token_returns_400(self):
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, {}, self.headers, expect_errors=True)
self.assertEqual(400, response.status_int)
def test_updates_existing_submission(self):
self.configure_environ_for_current_user()
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
'contents': 'old',
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
self.assertIsNone(self.get_submission(self.student.key(), self.user_id))
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers)
self.assertEqual(200, response.status_int)
params['contents'] = self.contents
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers)
self.assertEqual(200, response.status_int)
submissions = student_work.Submission.all().fetch(2)
self.assertEqual(1, len(submissions))
self.assertEqual(u'"%s"' % self.contents, submissions[0].contents)
def test_unsavable_contents_returns_400(self):
self.configure_environ_for_current_user()
user_xsrf_token = utils.XsrfTokenManager.create_xsrf_token(
upload._XSRF_TOKEN_NAME)
params = {
# Entity size = contents + other data, so 1MB here will overlfow.
'contents': 'a' * 1024 * 1024,
'form_xsrf_token': user_xsrf_token,
'unit_id': self.unit_id,
}
response = self.testapp.post(
upload._POST_ACTION_SUFFIX, params, self.headers,
expect_errors=True)
self.assertEqual(400, response.status_int)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models/review.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import entities
from models import models
from models import student_work
from models import transforms
from tests.functional import actions
from google.appengine.ext import db
# Don't require docstrings on tests. pylint: disable-msg=g-missing-docstring
# setUp is a name chosen by parent. pylint: disable-msg=g-bad-name
class ReferencedModel(entities.BaseEntity):
pass
class UnvalidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty()
class ValidatedReference(entities.BaseEntity):
referenced_model_key = student_work.KeyProperty(kind=ReferencedModel.kind())
class KeyPropertyTest(actions.TestBase):
"""Tests KeyProperty."""
def setUp(self):
super(KeyPropertyTest, self).setUp()
self.referenced_model_key = ReferencedModel().put()
def test_bidirectional_transforms_succeed(self):
"""Tests that transforms entity<->dict<->json round trips correctly."""
referenced_model_key = ReferencedModel().put()
entity = UnvalidatedReference(referenced_model_key=referenced_model_key)
entity.put()
transformed = transforms.entity_to_dict(entity)
self.assertEqual(referenced_model_key, entity.referenced_model_key)
self.assertEqual(
referenced_model_key, transformed['referenced_model_key'])
new_key = ReferencedModel().put()
transformed['referenced_model_key'] = new_key
restored = transforms.dict_to_entity(entity, transformed)
self.assertEqual(new_key, restored.referenced_model_key)
json = transforms.dict_to_json(transformed, None)
self.assertEqual(str(new_key), json['referenced_model_key'])
from_json = transforms.json_to_dict(
json, {'properties': {'referenced_model_key': {'type': 'string'}}})
self.assertEqual({'referenced_model_key': str(new_key)}, from_json)
def test_type_not_validated_if_kind_not_passed(self):
model_key = db.Model().put()
unvalidated = UnvalidatedReference(referenced_model_key=model_key)
self.assertEqual(model_key, unvalidated.referenced_model_key)
def test_validation_and_datastore_round_trip_of_keys_succeeds(self):
"""Tests happy path for both validation and (de)serialization."""
model_with_reference = ValidatedReference(
referenced_model_key=self.referenced_model_key)
model_with_reference_key = model_with_reference.put()
model_with_reference_from_datastore = db.get(model_with_reference_key)
self.assertEqual(
self.referenced_model_key,
model_with_reference_from_datastore.referenced_model_key)
custom_model_from_datastore = db.get(
model_with_reference_from_datastore.referenced_model_key)
self.assertEqual(
self.referenced_model_key, custom_model_from_datastore.key())
self.assertTrue(isinstance(
model_with_reference_from_datastore.referenced_model_key,
db.Key))
def test_validation_fails(self):
model_key = db.Model().put()
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key='not_a_key')
self.assertRaises(
db.BadValueError, ValidatedReference,
referenced_model_key=model_key)
class ReviewTest(actions.ExportTestBase):
def setUp(self):
super(ReviewTest, self).setUp()
self.reviewee_email = 'reviewee@exmaple.com'
self.reviewer_email = 'reviewer@example.com'
self.unit_id = 'unit_id'
self.reviewee = models.Student(key_name=self.reviewee_email)
self.reviewee_key = self.reviewee.put()
self.reviewer = models.Student(key_name=self.reviewer_email)
self.reviewer_key = self.reviewer.put()
self.review = student_work.Review(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
unit_id=self.unit_id)
self.review_key = self.review.put()
def test_constructor_sets_key_name(self):
self.assertEqual(
student_work.Review.key_name(
self.unit_id, self.reviewee_key, self.reviewer_key),
self.review_key.name())
def test_for_export_transforms_correctly(self):
exported = self.review.for_export(self.transform)
self.assert_blacklisted_properties_removed(self.review, exported)
self.assertEqual(
'transformed_' + self.reviewer_key.name(),
exported.reviewer_key.name())
def test_safe_key_makes_key_names_safe(self):
safe_review_key = student_work.Review.safe_key(
self.review_key, self.transform)
# Treat as module-protected. pylint: disable-msg=protected-access
_, safe_unit_id, safe_reviewee_key_str, safe_reviewer_key_str = (
student_work.Review._split_key(safe_review_key.name()))
safe_reviewee_key = db.Key(encoded=safe_reviewee_key_str)
safe_reviewer_key = db.Key(encoded=safe_reviewer_key_str)
self.assertEqual(
'transformed_' + self.reviewee_email, safe_reviewee_key.name())
self.assertEqual(
'transformed_' + self.reviewer_email, safe_reviewer_key.name())
self.assertEqual(self.unit_id, safe_unit_id)
class SubmissionTest(actions.ExportTestBase):
def setUp(self):
super(SubmissionTest, self).setUp()
self.reviewee_email = 'reviewee@example.com'
self.unit_id = 'unit_id'
self.reviewee = models.Student(key_name=self.reviewee_email)
self.reviewee_key = self.reviewee.put()
self.submission = student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id)
self.submission_key = self.submission.put()
def test_constructor_sets_key_name(self):
self.assertEqual(
student_work.Submission.key_name(self.unit_id, self.reviewee_key),
self.submission_key.name())
def test_for_export_transforms_correctly(self):
exported = self.submission.for_export(self.transform)
self.assert_blacklisted_properties_removed(self.submission, exported)
self.assertEqual(
'transformed_' + self.reviewee_key.name(),
exported.reviewee_key.name())
def test_safe_key_makes_reviewee_key_name_safe(self):
safe_submission_key = student_work.Submission.safe_key(
self.submission_key, self.transform)
# Treat as module-protected. pylint: disable-msg=protected-access
_, safe_unit_id, safe_reviewee_key_name = (
student_work.Submission._split_key(safe_submission_key.name()))
self.assertEqual(
'transformed_' + self.reviewee_email, safe_reviewee_key_name)
self.assertEqual(self.unit_id, safe_unit_id)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/review/review.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
import types
from models import models
from models import student_work
from modules.review import domain
from modules.review import peer
from modules.review import review as review_module
from tests.functional import actions
from google.appengine.ext import db
class ManagerTest(actions.TestBase):
"""Tests for review.Manager."""
# Don't require documentation for self-describing test methods.
# pylint: disable-msg=g-missing-docstring
def setUp(self): # Name set by parent. pylint: disable-msg=g-bad-name
super(ManagerTest, self).setUp()
self.reviewee = models.Student(key_name='reviewee@example.com')
self.reviewee_key = self.reviewee.put()
self.reviewer = models.Student(key_name='reviewer@example.com')
self.reviewer_key = self.reviewer.put()
self.unit_id = '1'
self.submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=self.reviewee_key, unit_id=self.unit_id))
def test_add_reviewer_adds_new_step_and_summary(self):
step_key = review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step = db.get(step_key)
summary = db.get(step.review_summary_key)
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_add_reviewer_existing_raises_assertion_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'no_summary_found_for_key')
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
AssertionError, review_module.Manager.add_reviewer, self.unit_id,
self.submission_key, self.reviewee_key, self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_existing_raises_transition_error_when_completed(self):
summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.add_reviewer,
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
def test_add_reviewer_unremoved_existing_changes_expired_to_assigned(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_add_reviewer_removed_unremoves_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
def test_add_reviewer_removed_unremoves_completed_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.completed_count)
def test_add_reviewer_removed_unremoves_and_assigns_expired_step(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
review_module.Manager.add_reviewer(
self.unit_id, self.submission_key, self.reviewee_key,
self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_HUMAN, step.assigner_kind)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_delete_reviewer_marks_step_removed_and_decrements_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertFalse(step.removed)
self.assertEqual(1, summary.assigned_count)
deleted_key = review_module.Manager.delete_reviewer(step_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(step_key, deleted_key)
self.assertTrue(step.removed)
self.assertEqual(0, summary.assigned_count)
def test_delete_reviewer_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_delete_reviewer_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.delete_reviewer, step_key)
def test_delete_reviewer_raises_removed_error_if_already_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.delete_reviewer,
step_key)
def test_expire_review_raises_key_error_when_step_missing(self):
self.assertRaises(
KeyError, review_module.Manager.expire_review,
db.Key.from_path(peer.ReviewStep.kind(), 'missing_step_key'))
def test_expire_review_raises_key_error_when_summary_missing(self):
missing_key = db.Key.from_path(
peer.ReviewSummary.kind(), 'missing_review_summary_key')
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=missing_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
KeyError, review_module.Manager.expire_review, step_key)
def test_expire_review_raises_transition_error_when_state_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_transition_error_when_state_expired(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.expire_review,
step_key)
def test_expire_review_raises_removed_error_when_step_removed(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.expire_review, step_key)
def test_expire_review_transitions_state_and_updates_summary(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step, summary = db.get([step_key, summary_key])
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
expired_key = review_module.Manager.expire_review(step_key)
step, summary = db.get([expired_key, summary_key])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, step.state)
def test_expire_old_reviews_for_unit_expires_found_reviews(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
first_step, second_step, summary = db.get(
[first_step_key, second_step_key, summary_key])
self.assertEqual(
[domain.REVIEW_STATE_EXPIRED, domain.REVIEW_STATE_EXPIRED],
[step.state for step in [first_step, second_step]])
self.assertEqual(0, summary.assigned_count)
self.assertEqual(2, summary.expired_count)
def test_expire_old_reviews_skips_errors_and_continues_processing(self):
# Create and bind a function that we can swap in to generate a query
# that will pick up bad results so we can tell that we skip them.
query_containing_unprocessable_entities = peer.ReviewStep.all(
keys_only=True)
query_fn = types.MethodType(
lambda x, y, z: query_containing_unprocessable_entities,
review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, 'get_expiry_query', query_fn)
summary_key = peer.ReviewSummary(
assigned_count=1, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
processable_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
error_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
review_module.Manager.expire_old_reviews_for_unit(0, self.unit_id)
processed_step, error_step, summary = db.get(
[processable_step_key, error_step_key, summary_key])
self.assertEqual(domain.REVIEW_STATE_COMPLETED, error_step.state)
self.assertEqual(domain.REVIEW_STATE_EXPIRED, processed_step.state)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(1, summary.expired_count)
def test_get_assignment_candidates_query_filters_and_orders_correctly(self):
unused_wrong_unit_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=str(int(self.unit_id) + 1)
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
older_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
younger_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=third_reviewee_key,
submission_key=third_submission_key, unit_id=self.unit_id
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
completed_but_not_assigned_key = peer.ReviewSummary(
assigned_count=0, completed_count=1,
reviewee_key=fourth_reviewee_key,
submission_key=fourth_submission_key, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
assigned_but_not_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=0,
reviewee_key=fifth_reviewee_key,
submission_key=fifth_submission_key, unit_id=self.unit_id
).put()
results = review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(5)
self.assertEqual([
assigned_but_not_completed_key,
completed_but_not_assigned_key,
older_assigned_and_completed_key,
younger_assigned_and_completed_key
], [r.key() for r in results])
def test_get_expiry_query_filters_and_orders_correctly(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_completed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
unused_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
unused_other_unit_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=third_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=third_submission_key,
state=domain.REVIEW_STATE_ASSIGNED,
unit_id=str(int(self.unit_id) + 1)
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
first_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fourth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fourth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
second_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fifth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fifth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
zero_review_window_query = review_module.Manager.get_expiry_query(
0, self.unit_id)
future_review_window_query = review_module.Manager.get_expiry_query(
1, self.unit_id)
self.assertEqual(
[first_assigned_step_key, second_assigned_step_key],
zero_review_window_query.fetch(3))
# No items are > 1 minute old, so we expect an empty result set.
self.assertEqual(None, future_review_window_query.get())
def test_get_new_review_creates_step_and_updates_summary(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
summary = db.get(summary_key)
self.assertEqual(0, summary.assigned_count)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertEqual(summary.key(), step.review_summary_key)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_raises_key_error_when_summary_missing(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side effect delete the review summary, causing a
# the lookup by key to fail.
def pick_and_remove(unused_cls, candidates):
db.delete(summary_key)
return candidates[0]
fn = types.MethodType(
pick_and_remove, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
KeyError, review_module.Manager.get_new_review, self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_already_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_completed(self):
summary_key = peer.ReviewSummary(
completed=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
already_completed_unremoved_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
db.delete(already_completed_unremoved_step_key)
unused_already_completed_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_review_is_for_self(self):
peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewer_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_no_candidates(self):
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_retry_limit_hit(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key, max_retries=0)
def test_get_new_review_raises_not_assignable_when_summary_updated(self):
summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
summary.put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the summary so we'll reject it
# as a candidate.
def pick_and_update(unused_cls, candidates):
db.put(summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
domain.NotAssignableError, review_module.Manager.get_new_review,
self.unit_id, self.reviewer_key)
def test_get_new_review_reassigns_removed_assigned_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_assigned_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_reassigns_removed_expired_step(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
unused_already_expired_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertFalse(step.removed)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.expired_count)
def test_get_new_review_retries_successfully(self):
higher_priority_summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
higher_priority_summary_key = higher_priority_summary.put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
lower_priority_summary_key = peer.ReviewSummary(
completed_count=1, reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
self.assertEqual( # Ensure we'll process higher priority first.
[higher_priority_summary_key, lower_priority_summary_key],
[c.key() for c in
review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(2)])
# Create and bind a function that we can swap in to pick the review
# candidate but as a side-effect updates the highest priority candidate
# so we'll skip it and retry.
def pick_and_update(unused_cls, candidates):
db.put(higher_priority_summary)
return candidates[0]
fn = types.MethodType(
pick_and_update, review_module.Manager(), review_module.Manager)
self.swap(
review_module.Manager, '_choose_assignment_candidate', fn)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step = db.get(step_key)
self.assertEqual(lower_priority_summary_key, step.review_summary_key)
def test_get_review_step_keys_by_returns_list_of_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='reviewer2@example.com')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertEqual(
[matching_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_keys_in_sorted_order(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
first_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
second_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
[first_step_key, second_step_key],
review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_step_keys_by_returns_empty_list_when_no_matches(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
non_matching_reviewer = models.Student(key_name='reviewer2@example.com')
non_matching_reviewer_key = non_matching_reviewer.put()
unused_non_matching_step_different_reviewer_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=non_matching_reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id,
).put()
unused_non_matching_step_different_unit_id_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=str(int(self.unit_id) + 1),
).put()
self.assertEqual(
[], review_module.Manager.get_review_step_keys_by(
self.unit_id, self.reviewer_key))
def test_get_review_steps_by_keys(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
second_reviewer_key = models.Student(
key_name='reviewer2@example.com').put()
missing_step_key = db.Key.from_path(
peer.ReviewStep.kind(),
peer.ReviewStep.key_name(
self.submission_key, second_reviewer_key))
model_objects = db.get([step_key, missing_step_key])
domain_objects = review_module.Manager.get_review_steps_by_keys(
[step_key, missing_step_key])
model_step, model_miss = model_objects
domain_step, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_step.assigner_kind, domain_step.assigner_kind)
self.assertEqual(model_step.change_date, domain_step.change_date)
self.assertEqual(model_step.create_date, domain_step.create_date)
self.assertEqual(model_step.key(), domain_step.key)
self.assertEqual(model_step.removed, domain_step.removed)
self.assertEqual(model_step.review_key, domain_step.review_key)
self.assertEqual(
model_step.review_summary_key, domain_step.review_summary_key)
self.assertEqual(model_step.reviewee_key, domain_step.reviewee_key)
self.assertEqual(model_step.reviewer_key, domain_step.reviewer_key)
self.assertEqual(model_step.state, domain_step.state)
self.assertEqual(model_step.submission_key, domain_step.submission_key)
self.assertEqual(model_step.unit_id, domain_step.unit_id)
def test_get_reviews_by_keys(self):
review_key = student_work.Review(
contents='contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id
).put()
missing_review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(
str(int(self.unit_id) + 1), self.reviewee_key,
self.reviewer_key))
model_objects = db.get([review_key, missing_review_key])
domain_objects = review_module.Manager.get_reviews_by_keys(
[review_key, missing_review_key])
model_review, model_miss = model_objects
domain_review, domain_miss = domain_objects
self.assertEqual(2, len(model_objects))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_review.contents, domain_review.contents)
self.assertEqual(model_review.key(), domain_review.key)
def test_get_submission_and_review_step_keys_no_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, []),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_with_steps(self):
student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
matching_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
non_matching_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
non_matching_submission_key = student_work.Submission(
contents='contents2', reviewee_key=non_matching_reviewee_key,
unit_id=self.unit_id).put()
unused_non_matching_step_different_submission_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key,
reviewee_key=non_matching_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=non_matching_submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertEqual(
(self.submission_key, [matching_step_key]),
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submission_and_review_step_keys_returns_none_on_miss(self):
self.assertIsNone(
review_module.Manager.get_submission_and_review_step_keys(
self.unit_id, self.reviewee_key))
def test_get_submissions_by_keys(self):
submission_key = student_work.Submission(
contents='contents', reviewee_key=self.reviewee_key,
unit_id=self.unit_id).put()
missing_submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
str(int(self.unit_id) + 1), self.reviewee_key))
domain_models = db.get([submission_key, missing_submission_key])
domain_objects = review_module.Manager.get_submissions_by_keys(
[submission_key, missing_submission_key])
model_submission, model_miss = domain_models
domain_submission, domain_miss = domain_objects
self.assertEqual(2, len(domain_models))
self.assertEqual(2, len(domain_objects))
self.assertIsNone(model_miss)
self.assertIsNone(domain_miss)
self.assertEqual(model_submission.contents, domain_submission.contents)
self.assertEqual(model_submission.key(), domain_submission.key)
def test_start_review_process_for_succeeds(self):
key = review_module.Manager.start_review_process_for(
self.unit_id, self.submission_key, self.reviewee_key)
summary = db.get(key)
self.assertEqual(self.reviewee_key, summary.reviewee_key)
self.assertEqual(self.submission_key, summary.submission_key)
self.assertEqual(self.unit_id, summary.unit_id)
def test_start_review_process_for_throws_if_already_started(self):
collision = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
collision.put()
self.assertRaises(
domain.ReviewProcessAlreadyStartedError,
review_module.Manager.start_review_process_for,
self.unit_id, self.submission_key, self.reviewee_key)
def test_write_review_raises_constraint_error_if_key_but_no_review(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_constraint_error_if_no_summary(self):
missing_summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(self.submission_key))
review_key = student_work.Review(
contents='contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=missing_summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED,
unit_id=self.unit_id
).put()
self.assertRaises(
domain.ConstraintError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_raises_key_error_if_no_step(self):
bad_step_key = db.Key.from_path(peer.ReviewStep.kind(), 'missing')
self.assertRaises(
KeyError, review_module.Manager.write_review, bad_step_key,
'payload')
def test_write_review_raises_removed_error_if_step_removed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.RemovedError, review_module.Manager.write_review, step_key,
'payload')
def test_write_review_raises_transition_error_if_step_completed(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
self.assertRaises(
domain.TransitionError, review_module.Manager.write_review,
step_key, 'payload')
def test_write_review_with_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_false(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents', mark_completed=False)
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_no_review_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step_key).review_key)
updated_step_key = review_module.Manager.write_review(
step_key, 'contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual(step.review_key, updated_review.key())
self.assertEqual('contents', updated_review.contents)
def test_write_review_with_state_assigned_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_state_expired_and_mark_completed_true(self):
summary_key = peer.ReviewSummary(
expired_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, unit_id=self.unit_id).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
submission_key=self.submission_key,
state=domain.REVIEW_STATE_EXPIRED, unit_id=self.unit_id
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
step, summary = db.get([updated_step_key, summary_key])
updated_review = db.get(step.review_key)
self.assertEqual(1, summary.completed_count)
self.assertEqual(0, summary.expired_count)
self.assertEqual(domain.REVIEW_STATE_COMPLETED, step.state)
self.assertEqual('new_contents', updated_review.contents)
def test_write_review_with_two_students_creates_different_reviews(self):
reviewee1 = models.Student(key_name='reviewee1@example.com')
reviewee1_key = reviewee1.put()
reviewee2 = models.Student(key_name='reviewee2@example.com')
reviewee2_key = reviewee2.put()
submission1_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee1_key, unit_id=self.unit_id))
submission2_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee2_key, unit_id=self.unit_id))
summary1_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee1_key,
submission_key=submission1_key, unit_id=self.unit_id
).put()
step1_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary1_key, reviewee_key=reviewee1_key,
reviewer_key=self.reviewer_key, submission_key=submission1_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step1_key).review_key)
updated_step1_key = review_module.Manager.write_review(
step1_key, 'contents1', mark_completed=False)
self.assertEqual(step1_key, updated_step1_key)
summary2_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee2_key,
submission_key=submission2_key, unit_id=self.unit_id
).put()
step2_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary2_key, reviewee_key=reviewee2_key,
reviewer_key=self.reviewer_key, submission_key=submission2_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
self.assertIsNone(db.get(step2_key).review_key)
updated_step2_key = review_module.Manager.write_review(
step2_key, 'contents2', mark_completed=False)
self.assertEqual(step2_key, updated_step2_key)
step1, summary1 = db.get([updated_step1_key, summary1_key])
updated_review = db.get(step1.review_key)
self.assertEqual(1, summary1.assigned_count)
self.assertEqual(0, summary1.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step1.state)
self.assertEqual(step1.review_key, updated_review.key())
self.assertEqual('contents1', updated_review.contents)
step2, summary2 = db.get([updated_step2_key, summary2_key])
updated_review = db.get(step2.review_key)
self.assertEqual(1, summary2.assigned_count)
self.assertEqual(0, summary2.completed_count)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step2.state)
self.assertEqual(step2.review_key, updated_review.key())
self.assertEqual('contents2', updated_review.contents)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/course_explorer/course_explorer.py."""
__author__ = 'rahulsingal@google.com (Rahul Singal)'
from controllers import sites
from models import config
from models import models
from models import transforms
from models.models import PersonalProfile
from modules.course_explorer import course_explorer
from modules.course_explorer import student
import actions
from actions import assert_contains
from actions import assert_does_not_contain
from actions import assert_equals
class BaseExplorerTest(actions.TestBase):
"""Base class for testing explorer pages."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(BaseExplorerTest, self).setUp()
config.Registry.test_overrides[
models.CAN_SHARE_STUDENT_PROFILE.name] = True
config.Registry.test_overrides[
course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.name] = True
def tearDown(self): # pylint: disable-msg=g-bad-name
config.Registry.test_overrides = {}
super(BaseExplorerTest, self).tearDown()
class CourseExplorerTest(BaseExplorerTest):
"""Tests course_explorer module."""
def test_single_uncompleted_course(self):
"""Tests for a single available course."""
# This call should redirect to explorer page.
response = self.get('/')
assert_contains('/explorer', response.location)
name = 'Test student courses page'
email = 'Student'
actions.login(email)
# Test the explorer page.
response = self.get('/explorer')
assert_equals(response.status_int, 200)
assert_contains('Register', response.body)
# Navbar should not contain profile tab.
assert_does_not_contain(
'<a href="/explorer/profile">Profile</a>', response.body)
# Test 'my courses' page when a student is not enrolled in any course.
response = self.get('/explorer/courses')
assert_equals(response.status_int, 200)
assert_contains('You are not currently enrolled in any course',
response.body)
# Test 'my courses' page when a student is enrolled in all courses.
actions.register(self, name)
response = self.get('/explorer/courses')
assert_equals(response.status_int, 200)
assert_contains('Go to course', response.body)
assert_does_not_contain('You are not currently enrolled in any course',
response.body)
# After the student registers for a course,
# profile tab should be visible in navbar.
assert_contains(
'<a href="/explorer/profile">Profile</a>', response.body)
# Test profile page.
response = self.get('/explorer/profile')
assert_contains('<td>%s</td>' % email, response.body)
assert_contains('<td>%s</td>' % name, response.body)
assert_contains('Progress', response.body)
assert_does_not_contain('View score', response.body)
def test_single_completed_course(self):
"""Tests when a single completed course is present."""
email = 'test_assessments@google.com'
name = 'Test Assessments'
# Register.
actions.login(email)
actions.register(self, name)
response = self.get('/explorer')
# Before a course is not completed,
# explorer page should not show 'view score' button.
assert_does_not_contain('View score', response.body)
# Assign a grade to the course enrolled to mark it complete.
profile = PersonalProfile.get_by_key_name(email)
info = {'final_grade': 'A'}
course_info_dict = {'': info}
profile.course_info = transforms.dumps(course_info_dict)
profile.put()
# Check if 'View score' text is visible on profile page.
response = self.get('/explorer/profile')
assert_contains('View score', response.body)
# Check if 'Go to course' button is not visible on explorer page.
response = self.get('/explorer')
assert_does_not_contain('Go to course', response.body)
# Check if 'View score' button is visible on explorer page.
response = self.get('/explorer')
assert_contains('View score', response.body)
def test_multiple_course(self):
"""Tests when multiple courses are available."""
sites.setup_courses('course:/test::ns_test, course:/:/')
name = 'Test completed course'
email = 'Student'
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
actions.login(email)
actions.register(self, name)
response = self.get('/explorer/courses')
# Assert if 'View course list' text is shown on my course page.
assert_contains('View course list', response.body)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
sites.reset_courses()
class CourseExplorerDisabledTest(actions.TestBase):
"""Tests when course explorer is disabled."""
def test_course_explorer_disabled(self):
"""Tests for disabled course explorer page."""
# This call should redirect to course page not explorer page.
response = self.get('/')
assert_contains('/course', response.location)
name = 'Test explorer page off'
email = 'student'
# Register
actions.login(email)
actions.register(self, name)
response = self.get('/course')
# 'My courses' and 'Profile' tab should not be present in tab bar.
assert_does_not_contain('My Courses', response.body)
assert_does_not_contain('Profile', response.body)
class GlobalProfileTest(BaseExplorerTest):
"""Tests course_explorer module."""
def test_change_of_name(self):
"""Tests for a single available course."""
# This call should redirect to explorer page.
response = self.get('/')
assert_contains('/explorer', response.location)
name = 'Test global profile page'
email = 'student_global_profile@example.com'
actions.login(email)
# Test the explorer page.
response = self.get('/explorer')
assert_equals(response.status_int, 200)
assert_contains('Register', response.body)
# Test 'my courses' page when a student is enrolled in all courses.
actions.register(self, name)
# Test profile page.
response = self.get('/explorer/profile')
assert_contains('<td>%s</td>' % email, response.body)
assert_contains('<td>%s</td>' % name, response.body)
# Change the name now
new_name = 'New global name'
response.form.set('name', new_name)
response = self.submit(response.form)
assert_equals(response.status_int, 302)
response = self.get('/explorer/profile')
assert_contains('<td>%s</td>' % email, response.body)
assert_contains('<td>%s</td>' % new_name, response.body)
# Change name with bad xsrf token.
response = self.get('/explorer/profile')
assert_equals(response.status_int, 200)
new_name = 'New Bad global name'
response.form.set('name', new_name)
response.form.set('xsrf_token', 'asdfsdf')
response = response.form.submit(expect_errors=True)
assert_equals(response.status_int, 403)
# Change name with empty name shold fail.
response = self.get('/explorer/profile')
assert_equals(response.status_int, 200)
new_name = ''
response.form.set('name', new_name)
response = response.form.submit(expect_errors=True)
assert_equals(response.status_int, 400)
# Change name with overlong name should fail for str.
response = self.get('/explorer/profile')
assert_equals(response.status_int, 200)
# Constant is module-protected. pylint: disable-msg=protected-access
new_name = 'a' * (student._STRING_PROPERTY_MAX_BYTES + 1)
response.form.set('name', new_name)
response = response.form.submit(expect_errors=True)
assert_equals(response.status_int, 400)
# Change name with overlong name should fail for unicode.
response = self.get('/explorer/profile')
assert_equals(response.status_int, 200)
# \u03a3 == Sigma. len == 1 for unicode; 2 for utf-8 encoded str.
new_name = u'\u03a3' + ('a' * (student._STRING_PROPERTY_MAX_BYTES - 1))
response.form.set('name', new_name)
response = response.form.submit(expect_errors=True)
assert_equals(response.status_int, 400)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models.models."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import entities
from tests.functional import actions
from google.appengine.ext import db
class FirstChild(entities.BaseEntity):
first_kept = db.Property()
first_removed = db.Property()
_PROPERTY_EXPORT_BLACKLIST = [first_removed]
class SecondChild(FirstChild):
second_kept = db.Property()
second_removed = db.Property()
_PROPERTY_EXPORT_BLACKLIST = [second_removed]
class BaseEntityTestCase(actions.TestBase):
# Disable complaints about docstrings for self-documenting tests.
# pylint: disable-msg=g-missing-docstring
def test_for_export_returns_populated_export_entity_with_key(self):
first_kept = 'first_kept'
second_kept = 'second_kept'
second = SecondChild(first_kept=first_kept, second_kept=second_kept)
db.put(second)
second_export = second.for_export(lambda x: x)
self.assertEqual(first_kept, second_export.first_kept)
self.assertEqual(second_kept, second_export.second_kept)
self.assertEqual(second.key(), second_export.safe_key)
def test_for_export_removes_properties_up_inheritance_chain(self):
first = FirstChild()
second = SecondChild()
db.put([first, second])
first_export = first.for_export(lambda x: x)
second_export = second.for_export(lambda x: x)
self.assertTrue(hasattr(first_export, FirstChild.first_kept.name))
self.assertFalse(hasattr(first_export, FirstChild.first_removed.name))
self.assertTrue(hasattr(second_export, SecondChild.first_kept.name))
self.assertTrue(hasattr(second_export, SecondChild.second_kept.name))
self.assertFalse(hasattr(second_export, SecondChild.first_removed.name))
self.assertFalse(
hasattr(second_export, SecondChild.second_removed.name))
class ExportEntityTestCase(actions.TestBase):
# Name determined by parent. pylint: disable-msg=g-bad-name
def setUp(self):
super(ExportEntityTestCase, self).setUp()
self.entity = entities.ExportEntity(safe_key='foo')
def test_constructor_requires_safe_key(self):
self.assertRaises(AssertionError, entities.ExportEntity)
def test_put_raises_not_implemented_error(self):
self.assertRaises(NotImplementedError, self.entity.put)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for modules/review/peer.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import models
from models import student_work
from modules.review import domain
from modules.review import peer
from tests.functional import actions
from google.appengine.ext import db
# No docstrings on tests. pylint: disable-msg=g-missing-docstring
# Use mixedCase names from parent. pylint: disable-msg=g-bad-name
class ReviewStepTest(actions.ExportTestBase):
def setUp(self):
super(ReviewStepTest, self).setUp()
self.reviewee_email = 'reviewee@example.com'
self.reviewee_key = models.Student(key_name=self.reviewee_email).put()
self.reviewer_email = 'reviewer@example.com'
self.reviewer_key = models.Student(key_name=self.reviewer_email).put()
self.unit_id = 'unit_id'
self.submission_key = student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
self.review_key = student_work.Review(
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
unit_id=self.unit_id).put()
self.review_summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id).put()
self.step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, review_key=self.review_key,
review_summary_key=self.review_summary_key,
reviewee_key=self.reviewee_key, reviewer_key=self.reviewer_key,
state=domain.REVIEW_STATE_ASSIGNED,
submission_key=self.submission_key, unit_id=self.unit_id)
self.step_key = self.step.put()
def test_constructor_sets_key_name(self):
"""Tests construction of key_name, put of entity with key_name set."""
self.assertEqual(
peer.ReviewStep.key_name(self.submission_key, self.reviewer_key),
self.step_key.name())
def test_for_export_transforms_correctly(self):
exported = self.step.for_export(self.transform)
self.assert_blacklisted_properties_removed(self.step, exported)
self.assertEqual(
student_work.Review.safe_key(self.review_key, self.transform),
exported.review_key)
self.assertEqual(
peer.ReviewSummary.safe_key(
self.review_summary_key, self.transform),
exported.review_summary_key)
self.assertEqual(
models.Student.safe_key(self.reviewee_key, self.transform),
exported.reviewee_key)
self.assertEqual(
models.Student.safe_key(self.reviewer_key, self.transform),
exported.reviewer_key)
self.assertEqual(
student_work.Submission.safe_key(
self.submission_key, self.transform),
exported.submission_key)
def test_safe_key_transforms_or_retains_sensitive_data(self):
original_key = peer.ReviewStep.safe_key(self.step_key, lambda x: x)
transformed_key = peer.ReviewStep.safe_key(
self.step_key, self.transform)
get_reviewee_key_name = (
lambda x: x.split('%s:' % self.unit_id)[-1].split(')')[0])
get_reviewer_key_name = lambda x: x.rsplit(':')[-1].strip(')')
self.assertEqual(
self.reviewee_email, get_reviewee_key_name(original_key.name()))
self.assertEqual(
self.reviewer_email, get_reviewer_key_name(original_key.name()))
self.assertEqual(
'transformed_' + self.reviewee_email,
get_reviewee_key_name(transformed_key.name()))
self.assertEqual(
'transformed_' + self.reviewer_email,
get_reviewer_key_name(transformed_key.name()))
class ReviewSummaryTest(actions.ExportTestBase):
def setUp(self):
super(ReviewSummaryTest, self).setUp()
self.unit_id = 'unit_id'
self.reviewee_email = 'reviewee@example.com'
self.reviewee_key = models.Student(
key_name='reviewee@example.com').put()
self.submission_key = student_work.Submission(
reviewee_key=self.reviewee_key, unit_id=self.unit_id).put()
self.summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
self.summary_key = self.summary.put()
def test_constructor_sets_key_name(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id).put()
self.assertEqual(
peer.ReviewSummary.key_name(self.submission_key),
summary_key.name())
def test_decrement_count(self):
"""Tests decrement_count."""
summary = peer.ReviewSummary(
assigned_count=1, completed_count=1, expired_count=1,
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
self.assertEqual(1, summary.assigned_count)
summary.decrement_count(domain.REVIEW_STATE_ASSIGNED)
self.assertEqual(0, summary.assigned_count)
self.assertEqual(1, summary.completed_count)
summary.decrement_count(domain.REVIEW_STATE_COMPLETED)
self.assertEqual(0, summary.completed_count)
self.assertEqual(1, summary.expired_count)
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
self.assertEqual(0, summary.expired_count)
self.assertRaises(ValueError, summary.decrement_count, 'bad_state')
def test_increment_count(self):
"""Tests increment_count."""
summary = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
self.assertRaises(ValueError, summary.increment_count, 'bad_state')
self.assertEqual(0, summary.assigned_count)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
self.assertEqual(1, summary.assigned_count)
self.assertEqual(0, summary.completed_count)
summary.increment_count(domain.REVIEW_STATE_COMPLETED)
self.assertEqual(1, summary.completed_count)
self.assertEqual(0, summary.expired_count)
summary.increment_count(domain.REVIEW_STATE_EXPIRED)
self.assertEqual(1, summary.expired_count)
check_overflow = peer.ReviewSummary(
assigned_count=domain.MAX_UNREMOVED_REVIEW_STEPS - 1,
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id)
# Increment to the limit succeeds...
check_overflow.increment_count(domain.REVIEW_STATE_ASSIGNED)
# ...but not beyond.
self.assertRaises(
db.BadValueError,
check_overflow.increment_count, domain.REVIEW_STATE_ASSIGNED)
def test_for_export_transforms_correctly(self):
exported = self.summary.for_export(self.transform)
self.assert_blacklisted_properties_removed(self.summary, exported)
self.assertEqual(
models.Student.safe_key(self.reviewee_key, self.transform),
exported.reviewee_key)
self.assertEqual(
student_work.Submission.safe_key(
self.submission_key, self.transform),
exported.submission_key)
def test_safe_key_transforms_or_retains_sensitive_data(self):
original_key = peer.ReviewSummary.safe_key(
self.summary_key, lambda x: x)
transformed_key = peer.ReviewSummary.safe_key(
self.summary_key, self.transform)
get_reviewee_key_name = lambda x: x.rsplit(':', 1)[-1].strip(')')
self.assertEqual(
self.reviewee_email, get_reviewee_key_name(original_key.name()))
self.assertEqual(
'transformed_' + self.reviewee_email,
get_reviewee_key_name(transformed_key.name()))
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student progress trackers."""
__author__ = 'Sean Lip (sll@google.com)'
import datetime
import logging
import os
from tools import verify
import courses
from models import QuestionDAO
from models import QuestionGroupDAO
from models import StudentPropertyEntity
import transforms
class UnitLessonCompletionTracker(object):
"""Tracks student completion for a unit/lesson-based linear course."""
PROPERTY_KEY = 'linear-course-completion'
# Here are representative examples of the keys for the various entities
# used in this class:
# Unit 1: u.1
# Unit 1, Lesson 1: u.1.l.1
# Unit 1, Lesson 1, Activity 0: u.1.l.1.a.0
# Unit 1, Lesson 1, Activity 0, Block 4: u.1.l.1.a.0.b.4
# Assessment 'Pre': s.Pre
# At the moment, we do not divide assessments into blocks.
#
# The following keys were added in v1.5:
# Unit 1, Lesson 1, HTML: u.1.l.1.h.0
# Unit 1, Lesson 1, HTML, Component with instanceid id: u.1.l.1.h.0.c.id
#
# The number after the 'h' and 'a' codes is always zero, since a lesson may
# have at most one HTML body and one activity.
#
# IMPORTANT NOTE: The values of the keys mean different things depending on
# whether the entity is a composite entity or not.
# If it is a composite entity (unit, lesson, activity), then the value is
# - 0 if none of its sub-entities has been completed
# - 1 if some, but not all, of its sub-entities have been completed
# - 2 if all its sub-entities have been completed.
# If it is not a composite entity (i.e. block, assessment, component), then
# the value is just the number of times the event has been triggered.
# Constants for recording the state of composite entities.
# TODO(sll): Change these to enums.
NOT_STARTED_STATE = 0
IN_PROGRESS_STATE = 1
COMPLETED_STATE = 2
MULTIPLE_CHOICE = 'multiple choice'
MULTIPLE_CHOICE_GROUP = 'multiple choice group'
QUESTION_GROUP = 'question-group'
QUESTION = 'question'
EVENT_CODE_MAPPING = {
'unit': 'u',
'lesson': 'l',
'activity': 'a',
'html': 'h',
'block': 'b',
'assessment': 's',
'component': 'c',
}
COMPOSITE_ENTITIES = [
EVENT_CODE_MAPPING['unit'],
EVENT_CODE_MAPPING['lesson'],
EVENT_CODE_MAPPING['activity'],
EVENT_CODE_MAPPING['html']
]
# Names of component tags that are tracked for progress calculations.
TRACKABLE_COMPONENTS = frozenset([
'question',
'question-group',
])
def __init__(self, course):
self._course = course
def _get_course(self):
return self._course
def get_activity_as_python(self, unit_id, lesson_id):
"""Gets the corresponding activity as a Python object."""
root_name = 'activity'
course = self._get_course()
activity_text = course.app_context.fs.get(
os.path.join(course.app_context.get_home(),
course.get_activity_filename(unit_id, lesson_id)))
content, noverify_text = verify.convert_javascript_to_python(
activity_text, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
return activity
def _get_unit_key(self, unit_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['unit'], unit_id)
def _get_lesson_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id
)
def _get_activity_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0
)
def _get_html_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0
)
def _get_component_key(self, unit_id, lesson_id, component_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0,
self.EVENT_CODE_MAPPING['component'], component_id
)
def _get_block_key(self, unit_id, lesson_id, block_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0,
self.EVENT_CODE_MAPPING['block'], block_id
)
def _get_assessment_key(self, assessment_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['assessment'], assessment_id)
def get_entity_type_from_key(self, progress_entity_key):
return progress_entity_key.split('.')[-2]
def determine_if_composite_entity(self, progress_entity_key):
return self.get_entity_type_from_key(
progress_entity_key) in self.COMPOSITE_ENTITIES
def get_valid_component_ids(self, unit_id, lesson_id):
"""Returns a list of component ids representing trackable components."""
question_component_ids = [cpt['instanceid'] for cpt in (
self._get_course().get_question_components(
unit_id, lesson_id)) if cpt['instanceid']]
question_group_component_ids = [cpt['instanceid'] for cpt in (
self._get_course().get_question_group_components(
unit_id, lesson_id)) if cpt['instanceid']]
return question_component_ids + question_group_component_ids
def get_valid_block_ids(self, unit_id, lesson_id):
"""Returns a list of block ids representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[0] for block in valid_blocks_data]
def get_valid_blocks(self, unit_id, lesson_id):
"""Returns a list of blocks representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[1] for block in valid_blocks_data]
def _get_valid_blocks_data(self, unit_id, lesson_id):
"""Returns a list of (b_id, block) representing trackable activities."""
valid_blocks = []
# Check if activity exists before calling get_activity_as_python.
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
if unit and lesson and lesson.activity:
# Get the activity corresponding to this unit/lesson combination.
activity = self.get_activity_as_python(unit_id, lesson_id)
for block_id in range(len(activity['activity'])):
block = activity['activity'][block_id]
if isinstance(block, dict):
valid_blocks.append((block_id, block))
return valid_blocks
def get_id_to_questions_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in lessons. The keys of this
dict are question ids, and the corresponding values are dicts, each
containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_questions = {}
for unit in self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT):
unit_id = unit.unit_id
for lesson in self._get_course().get_lessons(unit_id):
lesson_id = lesson.lesson_id
# Add mapping dicts for questions in old-style activities.
if lesson.activity:
blocks = self._get_valid_blocks_data(unit_id, lesson_id)
for block_index, (block_id, block) in enumerate(blocks):
if block['questionType'] == self.MULTIPLE_CHOICE:
# Old style question.
id_to_questions.update(
self._create_old_style_question_dict(
block, block_id, block_index, unit, lesson))
elif (block['questionType'] ==
self.MULTIPLE_CHOICE_GROUP):
# Old style multiple choice group.
for ind, q in enumerate(block['questionsList']):
id_to_questions.update(
self._create_old_style_question_dict(
q, block_id, block_index, unit,
lesson, index=ind))
# Add mapping dicts for CBv1.5 style questions.
if lesson.objectives:
for cpt in self._get_course().get_question_components(
unit_id, lesson_id):
# CB v1.5 style questions.
id_to_questions.update(
self._create_v15_lesson_question_dict(
cpt, unit, lesson))
for cpt in self._get_course().get_question_group_components(
unit_id, lesson_id):
# CB v1.5 style question groups.
id_to_questions.update(
self._create_v15_lesson_question_group_dict(
cpt, unit, lesson))
return id_to_questions
def get_id_to_assessments_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in assessments. The keys of
this dict are question ids, and the corresponding values are dicts,
each containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_assessments = {}
for assessment in self._get_course().get_assessment_list():
if not self._get_course().needs_human_grader(assessment):
assessment_components = self._get_course(
).get_assessment_components(assessment.unit_id)
# CB v1.5 style assessments.
for cpt in assessment_components:
if cpt['cpt_name'] == self.QUESTION_GROUP:
id_to_assessments.update(
self._create_v15_assessment_question_group_dict(
cpt, assessment))
elif cpt['cpt_name'] == self.QUESTION:
id_to_assessments.update(
self._create_v15_assessment_question_dict(
cpt, assessment))
# Old style javascript assessments.
try:
content = self._get_course().get_assessment_content(
assessment)
id_to_assessments.update(
self._create_old_style_assessment_dict(
content['assessment'], assessment))
except AttributeError:
# Assessment file does not exist.
continue
return id_to_assessments
def _get_link_for_assessment(self, assessment_id):
return 'assessment?name=%s' % (assessment_id)
def _get_link_for_activity(self, unit_id, lesson_id):
return 'activity?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _get_link_for_lesson(self, unit_id, lesson_id):
return 'unit?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _create_v15_question_dict(self, q_id, label, link, num_choices):
"""Returns a dict that represents CB v1.5 style question."""
return {
q_id: {
'answer_counts': [0] * num_choices,
'label': label,
'location': link,
'score': 0,
'num_attempts': 0
}
}
def _create_v15_lesson_question_dict(self, cpt, unit, lesson):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'])
label = 'Unit %s Lesson %s, Question %s' % (
unit.index, lesson.index, question.description)
link = self._get_link_for_lesson(unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_lesson_question_group_dict(self, cpt, unit, lesson):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'], ind)
label = ('Unit %s Lesson %s, Question Group %s Question %s'
% (unit.index, lesson.index,
question_group.description,
question.description))
link = self._get_link_for_lesson(
unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
questions.update(self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_group_dict(self, cpt, assessment):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s.i.%s' % (
assessment.unit_id, cpt['instanceid'], ind)
label = '%s, Question Group %s Question %s' % (
assessment.title, question_group.description,
question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
questions.update(
self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_dict(self, cpt, assessment):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s' % (assessment.unit_id, cpt['instanceid'])
label = '%s, Question %s' % (
assessment.title, question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_old_style_question_dict(self, block, block_id, block_index,
unit, lesson, index=None):
try:
if index is not None:
# Question is in a multiple choice group.
b_id = 'u.%s.l.%s.b.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, block_id, index)
label = 'Unit %s Lesson %s Activity, Item %s Part %s' % (
unit.index, lesson.index, block_index + 1, index + 1)
else:
b_id = 'u.%s.l.%s.b.%s' % (
unit.unit_id, lesson.lesson_id, block_id)
label = 'Unit %s Lesson %s Activity, Item %s' % (
unit.index, lesson.index, block_index + 1)
return {
b_id: {
'answer_counts': [0] * len(block['choices']),
'label': label,
'location': self._get_link_for_activity(
unit.unit_id, lesson.lesson_id),
'score': 0,
'num_attempts': 0
}
}
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, block)
return {}
def _create_old_style_assessment_dict(self, content, assessment):
try:
questions = {}
for ind, question in enumerate(content['questionsList']):
if 'choices' in question:
questions.update(
{
's.%s.i.%s' % (assessment.unit_id, ind): {
'answer_counts': [0] * len(question['choices']),
'label': '%s, Question %s' % (
assessment.title, ind + 1),
'location': self._get_link_for_assessment(
assessment.unit_id),
'score': 0,
'num_attempts': 0
}
}
)
return questions
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, content)
return {}
def _update_unit(self, progress, event_key):
"""Updates a unit's progress if all its lessons have been completed."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 2
unit_id = split_event_key[1]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one lesson in this unit has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
# Check if all lessons in this unit have been completed.
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if (self.get_lesson_status(
progress,
unit_id, lesson.lesson_id) != self.COMPLETED_STATE):
return
# Record that all lessons in this unit have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_lesson(self, progress, event_key):
"""Updates a lesson's progress based on the progress of its children."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 4
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one part of this lesson has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if str(lesson.lesson_id) == lesson_id and lesson:
# Is the activity completed?
if (lesson.activity and self.get_activity_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Are all components of the lesson completed?
if (self.get_html_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Record that all activities in this lesson have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_activity(self, progress, event_key):
"""Updates activity's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
valid_block_ids = self.get_valid_block_ids(unit_id, lesson_id)
for block_id in valid_block_ids:
if not self.is_block_completed(
progress, unit_id, lesson_id, block_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_html(self, progress, event_key):
"""Updates html's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
cpt_ids = self.get_valid_component_ids(unit_id, lesson_id)
for cpt_id in cpt_ids:
if not self.is_component_completed(
progress, unit_id, lesson_id, cpt_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
UPDATER_MAPPING = {
'activity': _update_activity,
'html': _update_html,
'lesson': _update_lesson,
'unit': _update_unit
}
# Dependencies for recording derived events. The key is the current
# event, and the value is a tuple, each element of which contains:
# - the dependent entity to be updated
# - the transformation to apply to the id of the current event to get the
# id for the derived parent event
DERIVED_EVENTS = {
'block': (
{
'entity': 'activity',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'activity': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'lesson': (
{
'entity': 'unit',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'component': (
{
'entity': 'html',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'html': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
}
def put_activity_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed an activity."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'activity', self._get_activity_key(unit_id, lesson_id))
def put_html_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed a lesson page."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'html', self._get_html_key(unit_id, lesson_id))
def put_block_completed(self, student, unit_id, lesson_id, block_id):
"""Records that the given student has completed an activity block."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if block_id not in self.get_valid_block_ids(unit_id, lesson_id):
return
self._put_event(
student,
'block',
self._get_block_key(unit_id, lesson_id, block_id)
)
def put_component_completed(self, student, unit_id, lesson_id, cpt_id):
"""Records completion of a component in a lesson body."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if cpt_id not in self.get_valid_component_ids(unit_id, lesson_id):
return
self._put_event(
student,
'component',
self._get_component_key(unit_id, lesson_id, cpt_id)
)
def put_assessment_completed(self, student, assessment_id):
"""Records that the given student has completed the given assessment."""
if not self._get_course().is_valid_assessment_id(assessment_id):
return
self._put_event(
student, 'assessment', self._get_assessment_key(assessment_id))
def put_activity_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this activity."""
# This method currently exists because we need to mark activities
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_block_ids(unit_id, lesson_id):
self.put_activity_completed(student, unit_id, lesson_id)
def put_html_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this lesson page."""
# This method currently exists because we need to mark lesson bodies
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_component_ids(unit_id, lesson_id):
self.put_html_completed(student, unit_id, lesson_id)
def _put_event(self, student, event_entity, event_key):
"""Starts a cascade of updates in response to an event taking place."""
if student.is_transient or event_entity not in self.EVENT_CODE_MAPPING:
return
progress = self.get_or_create_progress(student)
self._update_event(
student, progress, event_entity, event_key, direct_update=True)
progress.updated_on = datetime.datetime.now()
progress.put()
def _update_event(self, student, progress, event_entity, event_key,
direct_update=False):
"""Updates statistics for the given event, and for derived events.
Args:
student: the student
progress: the StudentProgressEntity for the student
event_entity: the name of the affected entity (unit, lesson, etc.)
event_key: the key for the recorded event
direct_update: True if this event is being updated explicitly; False
if it is being auto-updated.
"""
if direct_update or event_entity not in self.UPDATER_MAPPING:
if event_entity in self.UPDATER_MAPPING:
# This is a derived event, so directly mark it as completed.
self._set_entity_value(
progress, event_key, self.COMPLETED_STATE)
else:
# This is not a derived event, so increment its counter by one.
self._inc(progress, event_key)
else:
self.UPDATER_MAPPING[event_entity](self, progress, event_key)
if event_entity in self.DERIVED_EVENTS:
for derived_event in self.DERIVED_EVENTS[event_entity]:
self._update_event(
student=student,
progress=progress,
event_entity=derived_event['entity'],
event_key=derived_event['generate_parent_id'](event_key),
)
def get_unit_status(self, progress, unit_id):
return self._get_entity_value(progress, self._get_unit_key(unit_id))
def get_lesson_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_lesson_key(unit_id, lesson_id))
def get_activity_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_activity_key(unit_id, lesson_id))
def get_html_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_html_key(unit_id, lesson_id))
def get_block_status(self, progress, unit_id, lesson_id, block_id):
return self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
def get_assessment_status(self, progress, assessment_id):
return self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
def is_block_completed(self, progress, unit_id, lesson_id, block_id):
value = self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
return value is not None and value > 0
def is_component_completed(self, progress, unit_id, lesson_id, cpt_id):
value = self._get_entity_value(
progress, self._get_component_key(unit_id, lesson_id, cpt_id))
return value is not None and value > 0
def is_assessment_completed(self, progress, assessment_id):
value = self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
return value is not None and value > 0
@classmethod
def get_or_create_progress(cls, student):
progress = StudentPropertyEntity.get(student, cls.PROPERTY_KEY)
if not progress:
progress = StudentPropertyEntity.create(
student=student, property_name=cls.PROPERTY_KEY)
progress.put()
return progress
def get_unit_progress(self, student):
"""Returns a dict with the states of each unit."""
if student.is_transient:
return {}
units = self._get_course().get_units()
progress = self.get_or_create_progress(student)
result = {}
for unit in units:
if unit.type == 'A':
result[unit.unit_id] = self.is_assessment_completed(
progress, unit.unit_id)
elif unit.type == 'U':
value = self.get_unit_status(progress, unit.unit_id)
result[unit.unit_id] = value or 0
return result
def get_lesson_progress(self, student, unit_id):
"""Returns a dict saying which lessons in this unit are completed."""
if student.is_transient:
return {}
lessons = self._get_course().get_lessons(unit_id)
progress = self.get_or_create_progress(student)
result = {}
for lesson in lessons:
result[lesson.lesson_id] = {
'html': self.get_html_status(
progress, unit_id, lesson.lesson_id) or 0,
'activity': self.get_activity_status(
progress, unit_id, lesson.lesson_id) or 0,
}
return result
def get_component_progress(self, student, unit_id, lesson_id, cpt_id):
"""Returns the progress status of the given component."""
if student.is_transient:
return 0
progress = self.get_or_create_progress(student)
return self.is_component_completed(
progress, unit_id, lesson_id, cpt_id) or 0
def _get_entity_value(self, progress, event_key):
if not progress.value:
return None
return transforms.loads(progress.value).get(event_key)
def _set_entity_value(self, student_property, key, value):
"""Sets the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
progress_dict[key] = value
student_property.value = transforms.dumps(progress_dict)
def _inc(self, student_property, key, value=1):
"""Increments the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
if key not in progress_dict:
progress_dict[key] = 0
progress_dict[key] += value
student_property.value = transforms.dumps(progress_dict)
class ProgressStats(object):
"""Defines the course structure definition for course progress tracking."""
def __init__(self, course):
self._course = course
self._tracker = UnitLessonCompletionTracker(course)
def compute_entity_dict(self, entity, parent_ids):
"""Computes the course structure dictionary.
Args:
entity: str. Represents for which level of entity the dict is being
computed. Valid entity levels are defined as keys to the dict
defined below, COURSE_STRUCTURE_DICT.
parent_ids: list of ids necessary to get children of the current
entity.
Returns:
A nested dictionary representing the structure of the course.
Every other level of the dictionary consists of a key, the label of
the entity level defined by EVENT_CODE_MAPPING in
UnitLessonCompletionTracker, whose value is a dictionary
INSTANCES_DICT. The keys of INSTANCES_DICT are instance_ids of the
corresponding entities, and the values are the entity_dicts of the
instance's children, in addition to a field called 'label'. Label
represents the user-facing name of the entity rather than
its intrinsic id. If one of these values is empty, this means
that the corresponding entity has no children.
Ex:
A Course with the following outlined structure:
Pre Assessment
Unit 1
Lesson 1
Unit 2
will have the following dictionary representation:
{
's': {
1: {
'label': 'Pre Assessment'
}
},
'u': {
2: {
'l': {
3: {
'label': 1
}
},
'label': 1
},
4: {
'label': 2
}
}
'label': 'UNTITLED COURSE'
}
"""
entity_dict = {'label': self._get_label(entity, parent_ids)}
for child_entity, get_children_ids in self.COURSE_STRUCTURE_DICT[
entity]['children']:
child_entity_dict = {}
for child_id in get_children_ids(self, *parent_ids):
new_parent_ids = parent_ids + [child_id]
child_entity_dict[child_id] = self.compute_entity_dict(
child_entity, new_parent_ids)
entity_dict[UnitLessonCompletionTracker.EVENT_CODE_MAPPING[
child_entity]] = child_entity_dict
return entity_dict
def _get_course(self):
return self._course
def _get_unit_ids_of_type_unit(self):
units = self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT)
return [unit.unit_id for unit in units]
def _get_assessment_ids(self):
assessments = self._get_course().get_assessment_list()
return [a.unit_id for a in assessments]
def _get_lesson_ids(self, unit_id):
lessons = self._get_course().get_lessons(unit_id)
return [lesson.lesson_id for lesson in lessons]
def _get_activity_ids(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
if self._get_course().find_lesson_by_id(unit, lesson_id).activity:
return [0]
return []
def _get_html_ids(self, unused_unit_id, unused_lesson_id):
return [0]
def _get_block_ids(self, unit_id, lesson_id, unused_activity_id):
return self._tracker.get_valid_block_ids(unit_id, lesson_id)
def _get_component_ids(self, unit_id, lesson_id, unused_html_id):
return self._tracker.get_valid_component_ids(unit_id, lesson_id)
def _get_label(self, entity, parent_ids):
return self.ENTITY_TO_HUMAN_READABLE_NAME_DICT[entity](
self, *parent_ids)
def _get_course_label(self):
# pylint: disable-msg=protected-access
return courses.Course.get_environ(self._get_course().app_context)[
'course']['title']
def _get_unit_label(self, unit_id):
unit = self._get_course().find_unit_by_id(unit_id)
return 'Unit %s' % unit.index
def _get_assessment_label(self, unit_id):
assessment = self._get_course().find_unit_by_id(unit_id)
return assessment.title
def _get_lesson_label(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
return lesson.index
def _get_activity_label(self, unit_id, lesson_id, unused_activity_id):
return str('L%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id)))
def _get_html_label(self, unit_id, lesson_id, unused_html_id):
return self._get_activity_label(unit_id, lesson_id, unused_html_id)
def _get_block_label(self, unit_id, lesson_id, unused_activity_id,
block_id):
return str('L%s.%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id),
block_id))
def _get_component_label(self, unit_id, lesson_id, unused_html_id,
component_id):
return self._get_block_label(
unit_id, lesson_id, unused_html_id, component_id)
# Outlines the structure of the course. The key is the entity level, and
# its value is a dictionary with following keys and its values:
# 'children': list of tuples. Each tuple consists of string representation
# of the child entity(ex: 'lesson') and a function to get the
# children elements. If the entity does not have children, the
# value will be an empty list.
# 'id': instance_id of the entity. If the entity is represented by a class
# with an id attribute(ex: units), string representation of the
# attribute is stored here. If the entity is defined by a dictionary
# (ex: components), then the value is the string 'None'.
#
COURSE_STRUCTURE_DICT = {
'course': {
'children': [('unit', _get_unit_ids_of_type_unit),
('assessment', _get_assessment_ids)],
},
'unit': {
'children': [('lesson', _get_lesson_ids)],
},
'assessment': {
'children': [],
},
'lesson': {
'children': [('activity', _get_activity_ids),
('html', _get_html_ids)],
},
'activity': {
'children': [('block', _get_block_ids)],
},
'html': {
'children': [('component', _get_component_ids)],
},
'block': {
'children': [],
},
'component': {
'children': [],
}
}
ENTITY_TO_HUMAN_READABLE_NAME_DICT = {
'course': _get_course_label,
'unit': _get_unit_label,
'assessment': _get_assessment_label,
'lesson': _get_lesson_label,
'activity': _get_activity_label,
'html': _get_html_label,
'block': _get_block_label,
'component': _get_component_label
}
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core data model classes."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import logging
import appengine_config
from config import ConfigProperty
import counters
from counters import PerfCounter
from entities import BaseEntity
import transforms
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.ext import db
# We want to use memcache for both objects that exist and do not exist in the
# datastore. If object exists we cache its instance, if object does not exist
# we cache this object below.
NO_OBJECT = {}
# The default amount of time to cache the items for in memcache.
DEFAULT_CACHE_TTL_SECS = 60 * 5
# Global memcache controls.
CAN_USE_MEMCACHE = ConfigProperty(
'gcb_can_use_memcache', bool, (
'Whether or not to cache various objects in memcache. For production '
'this value should be on to enable maximum performance. For '
'development this value should be off so you can see your changes to '
'course content instantaneously.'),
appengine_config.PRODUCTION_MODE)
# performance counters
CACHE_PUT = PerfCounter(
'gcb-models-cache-put',
'A number of times an object was put into memcache.')
CACHE_HIT = PerfCounter(
'gcb-models-cache-hit',
'A number of times an object was found in memcache.')
CACHE_MISS = PerfCounter(
'gcb-models-cache-miss',
'A number of times an object was not found in memcache.')
CACHE_DELETE = PerfCounter(
'gcb-models-cache-delete',
'A number of times an object was deleted from memcache.')
class MemcacheManager(object):
"""Class that consolidates all memcache operations."""
@classmethod
def get_namespace(cls):
"""Look up namespace from namespace_manager or use default."""
namespace = namespace_manager.get_namespace()
if namespace:
return namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def _get_namespace(cls, namespace):
if namespace is not None:
return namespace
return cls.get_namespace()
@classmethod
def get(cls, key, namespace=None):
"""Gets an item from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return None
value = memcache.get(key, namespace=cls._get_namespace(namespace))
# We store some objects in memcache that don't evaluate to True, but are
# real objects, '{}' for example. Count a cache miss only in a case when
# an object is None.
if value != None: # pylint: disable-msg=g-equals-none
CACHE_HIT.inc()
else:
logging.info('Cache miss, key: %s. %s', key, Exception())
CACHE_MISS.inc(context=key)
return value
@classmethod
def set(cls, key, value, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None):
"""Sets an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
CACHE_PUT.inc()
memcache.set(
key, value, ttl, namespace=cls._get_namespace(namespace))
@classmethod
def delete(cls, key, namespace=None):
"""Deletes an item from memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc()
memcache.delete(key, namespace=cls._get_namespace(namespace))
@classmethod
def incr(cls, key, delta, namespace=None):
"""Incr an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
memcache.incr(
key, delta,
namespace=cls._get_namespace(namespace), initial_value=0)
CAN_AGGREGATE_COUNTERS = ConfigProperty(
'gcb_can_aggregate_counters', bool,
'Whether or not to aggregate and record counter values in memcache. '
'This allows you to see counter values aggregated across all frontend '
'application instances. Without recording, you only see counter values '
'for one frontend instance you are connected to right now. Enabling '
'aggregation improves quality of performance metrics, but adds a small '
'amount of latency to all your requests.',
default_value=False)
def incr_counter_global_value(name, delta):
if CAN_AGGREGATE_COUNTERS.value:
MemcacheManager.incr(
'counter:' + name, delta,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
def get_counter_global_value(name):
if CAN_AGGREGATE_COUNTERS.value:
return MemcacheManager.get(
'counter:' + name,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
else:
return None
counters.get_counter_global_value = get_counter_global_value
counters.incr_counter_global_value = incr_counter_global_value
# Whether to record tag events in a database.
CAN_SHARE_STUDENT_PROFILE = ConfigProperty(
'gcb_can_share_student_profile', bool, (
'Whether or not to share student profile between different courses.'),
False)
class PersonalProfile(BaseEntity):
"""Personal information not specific to any course instance."""
email = db.StringProperty(indexed=False)
legal_name = db.StringProperty(indexed=False)
nick_name = db.StringProperty(indexed=False)
date_of_birth = db.DateProperty(indexed=False)
enrollment_info = db.TextProperty()
course_info = db.TextProperty()
_PROPERTY_EXPORT_BLACKLIST = [email, legal_name, nick_name, date_of_birth]
@property
def user_id(self):
return self.key().name()
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.name()))
class PersonalProfileDTO(object):
"""DTO for PersonalProfile."""
def __init__(self, personal_profile=None):
self.enrollment_info = '{}'
self.course_info = '{}'
if personal_profile:
self.user_id = personal_profile.user_id
self.email = personal_profile.email
self.legal_name = personal_profile.legal_name
self.nick_name = personal_profile.nick_name
self.date_of_birth = personal_profile.date_of_birth
self.enrollment_info = personal_profile.enrollment_info
self.course_info = personal_profile.course_info
class StudentProfileDAO(object):
"""All access and mutation methods for PersonalProfile and Student."""
TARGET_NAMESPACE = appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:personal-profile:%s' % key
@classmethod
def _get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns Entity object."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = MemcacheManager.get(
cls._memcache_key(user_id), namespace=cls.TARGET_NAMESPACE)
if profile == NO_OBJECT:
return None
if profile:
return profile
profile = PersonalProfile.get_by_key_name(user_id)
MemcacheManager.set(
cls._memcache_key(user_id), profile if profile else NO_OBJECT,
namespace=cls.TARGET_NAMESPACE)
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _add_new_profile(cls, user_id, email):
"""Adds new profile for a user_id and returns Entity object."""
if not CAN_SHARE_STUDENT_PROFILE.value:
return None
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = PersonalProfile(key_name=user_id)
profile.email = email
profile.enrollment_info = '{}'
profile.put()
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _update_global_profile_attributes(
cls, profile,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None):
"""Modifies various attributes of Student's Global Profile."""
# TODO(psimakov): update of email does not work for student
if email is not None:
profile.email = email
if legal_name is not None:
profile.legal_name = legal_name
if nick_name is not None:
profile.nick_name = nick_name
if date_of_birth is not None:
profile.date_of_birth = date_of_birth
if not (is_enrolled is None and final_grade is None and
course_info is None):
# Defer to avoid circular import.
# pylint: disable-msg=g-import-not-at-top
from controllers import sites
course = sites.get_course_for_current_request()
course_namespace = course.get_namespace_name()
if is_enrolled is not None:
enrollment_dict = transforms.loads(profile.enrollment_info)
enrollment_dict[course_namespace] = is_enrolled
profile.enrollment_info = transforms.dumps(enrollment_dict)
if final_grade is not None or course_info is not None:
course_info_dict = {}
if profile.course_info:
course_info_dict = transforms.loads(profile.course_info)
if course_namespace in course_info_dict.keys():
info = course_info_dict[course_namespace]
else:
info = {}
if final_grade:
info['final_grade'] = final_grade
if course_info:
info['info'] = course_info
course_info_dict[course_namespace] = info
profile.course_info = transforms.dumps(course_info_dict)
@classmethod
def _update_course_profile_attributes(
cls, student, nick_name=None, is_enrolled=None):
"""Modifies various attributes of Student's Course Profile."""
if nick_name is not None:
student.name = nick_name
if is_enrolled is not None:
student.is_enrolled = is_enrolled
@classmethod
def _update_attributes(
cls, profile, student,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None):
"""Modifies various attributes of Student and Profile."""
if profile:
cls._update_global_profile_attributes(
profile, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info)
if student:
cls._update_course_profile_attributes(
student, nick_name=nick_name, is_enrolled=is_enrolled)
@classmethod
def _put_profile(cls, profile):
"""Does a put() on profile objects."""
if not profile:
return
profile.put()
MemcacheManager.delete(
cls._memcache_key(profile.user_id),
namespace=cls.TARGET_NAMESPACE)
@classmethod
def get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns DTO object."""
profile = cls._get_profile_by_user_id(user_id)
if profile:
return PersonalProfileDTO(personal_profile=profile)
return None
@classmethod
def add_new_profile(cls, user_id, email):
return cls._add_new_profile(user_id, email)
@classmethod
def add_new_student_for_current_user(cls, nick_name, additional_fields):
user = users.get_current_user()
student_by_uid = Student.get_student_by_user_id(user.user_id())
is_valid_student = (student_by_uid is None or
student_by_uid.user_id == user.user_id())
assert is_valid_student, (
'Student\'s email and user id do not match.')
cls._add_new_student_for_current_user(
user.user_id(), user.email(), nick_name, additional_fields)
@classmethod
@db.transactional(xg=True)
def _add_new_student_for_current_user(
cls, user_id, email, nick_name, additional_fields):
"""Create new or re-enroll old student."""
# create profile if does not exist
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls._add_new_profile(user_id, email)
# create new student or re-enroll existing
student = Student.get_by_email(email)
if not student:
# TODO(psimakov): we must move to user_id as a key
student = Student(key_name=email)
# update profile
cls._update_attributes(
profile, student, nick_name=nick_name, is_enrolled=True)
# update student
student.user_id = user_id
student.additional_fields = additional_fields
# put both
cls._put_profile(profile)
student.put()
@classmethod
def get_enrolled_student_by_email_for(cls, email, app_context):
"""Returns student for a specific course."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
return Student.get_enrolled_student_by_email(email)
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
@db.transactional(xg=True)
def update(
cls, user_id, email, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, profile_only=False):
"""Updates a student and/or their global profile."""
student = None
if not profile_only:
student = Student.get_by_email(email)
if not student:
raise Exception('Unable to find student for: %s' % user_id)
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls.add_new_profile(user_id, email)
cls._update_attributes(
profile, student, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info)
cls._put_profile(profile)
if not profile_only:
student.put()
class Student(BaseEntity):
"""Student data specific to a course instance."""
enrolled_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
user_id = db.StringProperty(indexed=True)
name = db.StringProperty(indexed=False)
additional_fields = db.TextProperty(indexed=False)
is_enrolled = db.BooleanProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
scores = db.TextProperty(indexed=False)
_PROPERTY_EXPORT_BLACKLIST = [additional_fields, name]
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
def for_export(self, transform_fn):
"""Creates an ExportEntity populated from this entity instance."""
assert not hasattr(self, 'key_by_user_id')
model = super(Student, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
# Add a version of the key that always uses the user_id for the name
# component. This can be used to establish relationships between objects
# where the student key used was created via get_key(). In general,
# this means clients will join exports on this field, not the field made
# from safe_key().
model.key_by_user_id = self.get_key(transform_fn=transform_fn)
return model
@property
def is_transient(self):
return False
@property
def email(self):
return self.key().name()
@property
def profile(self):
return StudentProfileDAO.get_profile_by_user_id(self.user_id)
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student:%s' % key
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(Student, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Student, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def add_new_student_for_current_user(cls, nick_name, additional_fields):
StudentProfileDAO.add_new_student_for_current_user(
nick_name, additional_fields)
@classmethod
def get_by_email(cls, email):
return Student.get_by_key_name(email.encode('utf8'))
@classmethod
def get_enrolled_student_by_email(cls, email):
"""Returns enrolled student or None."""
student = MemcacheManager.get(cls._memcache_key(email))
if NO_OBJECT == student:
return None
if not student:
student = Student.get_by_email(email)
if student:
MemcacheManager.set(cls._memcache_key(email), student)
else:
MemcacheManager.set(cls._memcache_key(email), NO_OBJECT)
if student and student.is_enrolled:
return student
else:
return None
@classmethod
def _get_user_and_student(cls):
"""Loads user and student and asserts both are present."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
student = Student.get_by_email(user.email())
if not student:
raise Exception('Student instance corresponding to user %s not '
'found.' % user.email())
return user, student
@classmethod
def rename_current(cls, new_name):
"""Gives student a new name."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, nick_name=new_name)
@classmethod
def set_enrollment_status_for_current(cls, is_enrolled):
"""Changes student enrollment status."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, is_enrolled=is_enrolled)
def get_key(self, transform_fn=None):
"""Gets a version of the key that uses user_id for the key name."""
if not self.user_id:
raise Exception('Student instance has no user_id set.')
user_id = transform_fn(self.user_id) if transform_fn else self.user_id
return db.Key.from_path(Student.kind(), user_id)
@classmethod
def get_student_by_user_id(cls, user_id):
students = cls.all().filter(cls.user_id.name, user_id).fetch(limit=2)
if len(students) == 2:
raise Exception(
'There is more than one student with user_id %s' % user_id)
return students[0] if students else None
def has_same_key_as(self, key):
"""Checks if the key of the student and the given key are equal."""
return key == self.get_key()
class TransientStudent(object):
"""A transient student (i.e. a user who hasn't logged in or registered)."""
@property
def is_transient(self):
return True
class EventEntity(BaseEntity):
"""Generic events.
Each event has a 'source' that defines a place in a code where the event was
recorded. Each event has a 'user_id' to represent an actor who triggered
the event. The event 'data' is a JSON object, the format of which is defined
elsewhere and depends on the type of the event.
"""
recorded_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
source = db.StringProperty(indexed=False)
user_id = db.StringProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
@classmethod
def record(cls, source, user, data):
"""Records new event into a datastore."""
event = EventEntity()
event.source = source
event.user_id = user.user_id()
event.data = data
event.put()
def for_export(self, transform_fn):
model = super(EventEntity, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
return model
class StudentAnswersEntity(BaseEntity):
"""Student answers to the assessments."""
updated_on = db.DateTimeProperty(indexed=True)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentPropertyEntity(BaseEntity):
"""A property of a student, keyed by the string STUDENT_ID-PROPERTY_NAME."""
updated_on = db.DateTimeProperty(indexed=True)
name = db.StringProperty()
# Each of the following is a string representation of a JSON dict.
value = db.TextProperty()
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student_property:%s' % key
@classmethod
def create_key(cls, student_id, property_name):
return '%s-%s' % (student_id, property_name)
@classmethod
def create(cls, student, property_name):
return StudentPropertyEntity(
key_name=cls.create_key(student.user_id, property_name),
name=property_name)
@classmethod
def safe_key(cls, db_key, transform_fn):
user_id, name = db_key.name().split('-', 1)
return db.Key.from_path(
cls.kind(), '%s-%s' % (transform_fn(user_id), name))
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(StudentPropertyEntity, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Student, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def get(cls, student, property_name):
"""Loads student property."""
key = cls.create_key(student.user_id, property_name)
value = MemcacheManager.get(cls._memcache_key(key))
if NO_OBJECT == value:
return None
if not value:
value = cls.get_by_key_name(key)
if value:
MemcacheManager.set(cls._memcache_key(key), value)
else:
MemcacheManager.set(cls._memcache_key(key), NO_OBJECT)
return value
class BaseJsonDao(object):
"""Base DAO class for entities storing their data in a single JSON blob."""
@classmethod
def _memcache_key(cls, obj_id):
"""Makes a memcache key from datastore id."""
# Keeping case-sensitivity in kind() because Foo(object) != foo(object).
return '(entity:%s:%s)' % (cls.ENTITY.kind(), obj_id)
@classmethod
def get_all(cls):
entities = cls.ENTITY.all().fetch(1000)
return [
cls.DTO(e.key().id(), transforms.loads(e.data)) for e in entities]
@classmethod
def _load_entity(cls, obj_id):
if not obj_id:
return None
memcache_key = cls._memcache_key(obj_id)
entity = MemcacheManager.get(memcache_key)
if NO_OBJECT == entity:
return None
if not entity:
entity = cls.ENTITY.get_by_id(int(obj_id))
if entity:
MemcacheManager.set(memcache_key, entity)
else:
MemcacheManager.set(memcache_key, NO_OBJECT)
return entity
@classmethod
def load(cls, obj_id):
entity = cls._load_entity(obj_id)
if entity:
return cls.DTO(obj_id, transforms.loads(entity.data))
else:
return None
@classmethod
def save(cls, dto):
entity = cls._load_entity(dto.id)
if not entity:
entity = cls.ENTITY()
entity.data = transforms.dumps(dto.dict)
entity.put()
MemcacheManager.set(cls._memcache_key(entity.key().id()), entity)
return entity.key().id()
@classmethod
def save_all(cls, dtos):
"""Performs a block persist of a list of DTO's."""
entities = []
for dto in dtos:
entity = cls._load_entity(dto.id)
if not entity:
entity = cls.ENTITY()
entity.data = transforms.dumps(dto.dict)
entities.append(entity)
keys = db.put(entities)
for key, entity in zip(keys, entities):
MemcacheManager.set(cls._memcache_key(key.id()), entity)
return [key.id() for key in keys]
@classmethod
def delete(cls, dto):
entity = cls._load_entity(dto.id)
entity.delete()
MemcacheManager.delete(cls._memcache_key(entity.key().id()))
class QuestionEntity(BaseEntity):
"""An object representing a top-level question."""
data = db.TextProperty(indexed=False)
class QuestionDTO(object):
"""DTO for question entities."""
MULTIPLE_CHOICE = 0
SHORT_ANSWER = 1
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def type(self):
return self.dict.get('type')
@type.setter
def type(self, value):
self.dict['type'] = value
@property
def description(self):
return self.dict.get('description') or ''
class QuestionDAO(BaseJsonDao):
DTO = QuestionDTO
ENTITY = QuestionEntity
@classmethod
def used_by(cls, question_dto_id):
"""Returns descriptions of the question groups using a question.
Args:
question_dto_id: int. Identifier of the question we're testing.
Returns:
List of unicode. The lexicographically-sorted list of the
descriptions of all question groups that use the given question.
"""
# O(num_question_groups), but deserialization of 1 large group takes
# ~1ms so practically speaking latency is OK for the admin console.
matches = []
for group in QuestionGroupDAO.get_all():
if long(question_dto_id) in [long(x) for x in group.question_ids]:
matches.append(group.description)
return sorted(matches)
class SaQuestionConstants(object):
DEFAULT_WIDTH_COLUMNS = 100
DEFAULT_HEIGHT_ROWS = 1
class QuestionGroupEntity(BaseEntity):
"""An object representing a question group in the datastore."""
data = db.TextProperty(indexed=False)
class QuestionGroupDTO(object):
"""Data transfer object for question groups."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def description(self):
return self.dict.get('description') or ''
@property
def introduction(self):
return self.dict.get('introduction') or ''
@property
def question_ids(self):
return [item['question'] for item in self.dict.get('items', [])]
class QuestionGroupDAO(BaseJsonDao):
DTO = QuestionGroupDTO
ENTITY = QuestionGroupEntity
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to work with various models."""
__author__ = [
'johncox@google.com (John Cox)',
'sll@google.com (Sean Lip)',
]
import logging
import transforms
_LOG = logging.getLogger('models.utils')
logging.basicConfig()
class Error(Exception):
"""Base error class."""
class StopMapping(Error):
"""Raised by user's map function to stop execution."""
class QueryMapper(object):
"""Mapper that applies a function to each result of a db.query.
QueryMapper works with result sets larger than 1000.
Usage:
def map_fn(model, named_arg, keyword_arg=None):
[...]
query = MyModel.all()
# We manipulate query, so it cannot be reused after it's fed to
# QueryMapper.
mapper = QueryMapper(query)
mapper.run(map_fn, 'foo', keyword_arg='bar')
"""
def __init__(self, query, batch_size=20, counter=None, report_every=None):
"""Constructs a new QueryMapper.
Args:
query: db.Query. The query to run. Cannot be reused after the
query mapper's run() method is invoked.
batch_size: int. Number of results to fetch per batch.
counter: entities.PerfCounter or None. If given, the counter to
increment once for every entity retrieved by query.
report_every: int or None. If specified, every report_every results
we will log the number of results processed at level info. By
default we will do this every 10 batches. Set to 0 to disable
logging.
"""
if report_every is None:
report_every = 10 * batch_size
self._batch_size = batch_size
self._counter = counter
self._query = query
self._report_every = report_every
def run(self, fn, *fn_args, **fn_kwargs):
"""Runs the query in batches, applying a function to each result.
Args:
fn: function. Takes a single query result (either a db.Key or
db.Model) instance as its first arg, then any number of
positional and keyword arguments. Called on each result returned
by the query.
*fn_args: positional args delegated to fn.
**fn_kwargs: keyword args delegated to fn.
Returns:
Integer. Total number of results processed.
"""
total_count = 0
cursor = None
while True:
batch_count, cursor = self._handle_batch(
cursor, fn, *fn_args, **fn_kwargs)
total_count += batch_count
if not (batch_count and cursor):
return total_count
if self._report_every != 0 and not total_count % self._report_every:
_LOG.info(
'Models processed by %s.%s so far: %s',
fn.__module__, fn.func_name, total_count)
def _handle_batch(self, cursor, fn, *fn_args, **fn_kwargs):
if cursor:
self._query.with_cursor(start_cursor=cursor)
count = 0
empty = True
batch = self._query.fetch(limit=self._batch_size)
if self._counter:
self._counter.inc(increment=len(batch))
for result in batch:
try:
fn(result, *fn_args, **fn_kwargs)
except StopMapping:
return count, None
count += 1
empty = False
cursor = None
if not empty:
cursor = self._query.cursor()
return count, cursor
def set_answer(answers, assessment_name, answer):
"""Stores the answer array for the given student and assessment.
The caller must call answers.put() to commit.
This does not do any type-checking on 'answer'; it just stores whatever
is passed in.
Args:
answers: the StudentAnswers entity in which the answer should be stored.
assessment_name: the name of the assessment.
answer: an array containing the student's answers.
"""
if not answers.data:
score_dict = {}
else:
score_dict = transforms.loads(answers.data)
score_dict[assessment_name] = answer
answers.data = transforms.dumps(score_dict)
def set_score(student, assessment_name, score):
"""Stores the score for the given student and assessment.
The caller must call student.put() to commit.
This does not do any type-checking on 'score'; it just stores whatever
is passed in.
Args:
student: the student whose answer should be stored.
assessment_name: the name of the assessment.
score: the student's score.
"""
if not student.scores:
score_dict = {}
else:
score_dict = transforms.loads(student.scores)
score_dict[assessment_name] = score
student.scores = transforms.dumps(score_dict)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Review processor that is used for managing human-reviewed assessments."""
__author__ = [
'sll@google.com (Sean Lip)',
]
from modules.review import domain
import entities
import student_work
import transforms
# Indicates that a human-graded assessment is peer-graded.
PEER_MATCHER = 'peer'
# Allowed matchers.
ALLOWED_MATCHERS = [PEER_MATCHER]
class ReviewsProcessor(object):
"""A class that processes review arrangements."""
TYPE_IMPL_MAPPING = {
PEER_MATCHER: None,
}
@classmethod
def set_peer_matcher(cls, matcher):
cls.TYPE_IMPL_MAPPING[PEER_MATCHER] = matcher
def __init__(self, course):
self._course = course
def _get_course(self):
return self._course
def _get_impl(self, unit_id):
unit = self._get_course().find_unit_by_id(unit_id)
return self.TYPE_IMPL_MAPPING[unit.workflow.get_matcher()]
def _get_review_step_keys_by(self, unit_id, reviewer_key):
impl = self._get_impl(unit_id)
return impl.get_review_step_keys_by(str(unit_id), reviewer_key)
def _get_submission_and_review_step_keys(self, unit_id, reviewee_key):
impl = self._get_impl(unit_id)
return impl.get_submission_and_review_step_keys(
str(unit_id), reviewee_key)
def add_reviewer(self, unit_id, reviewee_key, reviewer_key):
submission_key = student_work.Submission.get_key(unit_id, reviewee_key)
impl = self._get_impl(unit_id)
return impl.add_reviewer(
str(unit_id), submission_key, reviewee_key, reviewer_key)
def delete_reviewer(self, unit_id, review_step_key):
impl = self._get_impl(unit_id)
return impl.delete_reviewer(review_step_key)
def get_new_review(self, unit_id, reviewer_key):
impl = self._get_impl(unit_id)
return impl.get_new_review(str(unit_id), reviewer_key)
def get_review_steps_by(self, unit_id, reviewer_key):
review_step_keys = self._get_review_step_keys_by(unit_id, reviewer_key)
return self.get_review_steps_by_keys(unit_id, review_step_keys)
def get_reviews_by_keys(
self, unit_id, review_keys, handle_empty_keys=False):
"""Gets a list of reviews, given their review keys.
If handle_empty_keys is True, then no error is thrown on supplied keys
that are None; the elements in the result list corresponding to those
keys simply return None. This usually arises when this method is called
immediately after get_review_steps_by_keys().
Args:
unit_id: string. Id of the unit to get the reviews for.
review_keys: [db.Key of peer.ReviewStep]. May include None, if
handle_empty_keys is True.
handle_empty_keys: if True, the return value contains None for keys
that are None. If False, the method throws if empty keys are
supplied.
Returns:
List with the same number of elements as review_keys. It contains:
- the JSON-decoded contents of the review corresponding to that
review_key, or
- None if either:
- no review has been submitted for that review key, or
- handle_empty_keys == True and the review_key is None.
"""
impl = self._get_impl(unit_id)
reviews = []
if not handle_empty_keys:
reviews = impl.get_reviews_by_keys(review_keys)
else:
nonempty_review_indices = []
nonempty_review_keys = []
for idx, review_key in enumerate(review_keys):
if review_key is not None:
nonempty_review_indices.append(idx)
nonempty_review_keys.append(review_key)
tmp_reviews = impl.get_reviews_by_keys(nonempty_review_keys)
reviews = [None] * len(review_keys)
for (i, idx) in enumerate(nonempty_review_indices):
reviews[idx] = tmp_reviews[i]
return [(transforms.loads(rev.contents) if rev else None)
for rev in reviews]
def get_review_steps_by_keys(self, unit_id, review_step_keys):
impl = self._get_impl(unit_id)
return impl.get_review_steps_by_keys(review_step_keys)
def get_submission_and_review_steps(self, unit_id, reviewee_key):
"""Gets the submission and a list of review steps for a unit/reviewee.
Note that review steps marked removed are included in the result set.
Args:
unit_id: string. Id of the unit to get the data for.
reviewee_key: db.Key of models.models.Student. The student to get
the data for.
Returns:
- None if no submission was found for the given unit_id,
reviewee_key pair.
- (Object, [peer.ReviewStep]) otherwise. The first element is the
de-JSONified content of the reviewee's submission. The second
element is a list of review steps for this submission, sorted
by creation date.
"""
submission_and_review_step_keys = (
self._get_submission_and_review_step_keys(unit_id, reviewee_key))
if submission_and_review_step_keys is None:
return None
submission_contents = student_work.Submission.get_contents_by_key(
submission_and_review_step_keys[0])
review_step_keys = submission_and_review_step_keys[1]
sorted_review_steps = sorted(
self.get_review_steps_by_keys(unit_id, review_step_keys),
key=lambda r: r.create_date)
return [submission_contents, sorted_review_steps]
def does_submission_exist(self, unit_id, reviewee_key):
submission_key = student_work.Submission.get_key(unit_id, reviewee_key)
return bool(entities.get(submission_key))
def start_review_process_for(self, unit_id, submission_key, reviewee_key):
impl = self._get_impl(unit_id)
return impl.start_review_process_for(
str(unit_id), submission_key, reviewee_key)
def write_review(
self, unit_id, review_step_key, review_payload, mark_completed):
impl = self._get_impl(unit_id)
return impl.write_review(
review_step_key, transforms.dumps(review_payload),
mark_completed=mark_completed)
class ReviewUtils(object):
"""A utility class for processing data relating to assessment reviews."""
@classmethod
def count_completed_reviews(cls, review_steps):
"""Counts the number of completed reviews in the given set."""
count = 0
for review_step in review_steps:
if review_step.state == domain.REVIEW_STATE_COMPLETED:
count += 1
return count
@classmethod
def has_completed_all_assigned_reviews(cls, review_steps):
"""Returns whether the student has completed all assigned reviews."""
for review_step in review_steps:
if review_step.state != domain.REVIEW_STATE_COMPLETED:
return False
return True
@classmethod
def has_completed_enough_reviews(cls, reviews, review_min_count):
"""Checks whether the review count is at least the minimum required."""
return cls.count_completed_reviews(reviews) >= review_min_count
@classmethod
def get_review_progress(
cls, review_steps, review_min_count, progress_tracker):
"""Gets the progress value based on the number of reviews done.
Args:
review_steps: a list of ReviewStep objects.
review_min_count: the minimum number of reviews that the student is
required to complete for this assessment.
progress_tracker: the course progress tracker.
Returns:
the corresponding progress value: 0 (not started), 1 (in progress) or
2 (completed).
"""
completed_reviews = cls.count_completed_reviews(review_steps)
if cls.has_completed_enough_reviews(review_steps, review_min_count):
return progress_tracker.COMPLETED_STATE
elif completed_reviews > 0:
return progress_tracker.IN_PROGRESS_STATE
else:
return progress_tracker.NOT_STARTED_STATE
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Properties and its collections."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
import collections
class Property(object):
"""Property."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, extra_schema_dict_values=None):
self._name = name
self._label = label
self._property_type = property_type
self._select_data = select_data
self._description = description
self._optional = optional
self._extra_schema_dict_values = extra_schema_dict_values
@property
def name(self):
return self._name
class Registry(object):
"""Registry is a collection of Property's."""
def __init__(self, title, description=None, extra_schema_dict_values=None):
self._title = title
self._registry = {'id': title, 'type': 'object'}
self._description = description
if description:
self._registry['description'] = description
self._extra_schema_dict_values = extra_schema_dict_values
self._properties = []
self._sub_registories = collections.OrderedDict()
@property
def title(self):
return self._title
def add_property(self, schema_field):
"""Add a Property to this Registry."""
self._properties.append(schema_field)
def add_sub_registry(
self, name, title=None, description=None, registry=None):
"""Add a sub registry to for this Registry."""
if not registry:
registry = Registry(title, description)
self._sub_registories[name] = registry
return registry
def has_subregistries(self):
return True if self._sub_registories else False
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual file system for managing files locally or in the cloud."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import os
from common import jinja_utils
import jinja2
from entities import BaseEntity
from models import MemcacheManager
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# We want to use memcache for both objects that exist and do not exist in the
# datastore. If object exists we cache its instance, if object does not exist
# we cache this object below.
NO_OBJECT = {}
class AbstractFileSystem(object):
"""A generic file system interface that forwards to an implementation."""
def __init__(self, impl):
self._impl = impl
@property
def impl(self):
return self._impl
@classmethod
def normpath(cls, path):
"""Make Windows and Linux filenames to have the same separator '/'."""
# Replace '\' into '/' and force Unicode.
if not path:
return path
return u'' + path.replace('\\', '/')
def isfile(self, filename):
"""Checks if file exists, similar to os.path.isfile(...)."""
return self._impl.isfile(filename)
def open(self, filename):
"""Returns a stream with the file content, similar to open(...)."""
return self._impl.get(filename)
def get(self, filename):
"""Returns bytes with the file content, but no metadata."""
return self._impl.get(filename).read()
def put(self, filename, stream, **kwargs):
"""Replaces the contents of the file with the bytes in the stream."""
self._impl.put(filename, stream, **kwargs)
def delete(self, filename):
"""Deletes a file and metadata associated with it."""
self._impl.delete(filename)
def list(self, dir_name):
"""Lists all files in a directory."""
return self._impl.list(dir_name)
def get_jinja_environ(self, dir_names):
"""Configures jinja environment loaders for this file system."""
return self._impl.get_jinja_environ(dir_names)
def is_read_write(self):
return self._impl.is_read_write()
def is_draft(self, stream):
if not hasattr(stream, 'metadata'):
return False
if not stream.metadata:
return False
return stream.metadata.is_draft
class LocalReadOnlyFileSystem(object):
"""A read-only file system serving only local files."""
def __init__(self, logical_home_folder=None, physical_home_folder=None):
"""Creates a new instance of the disk-backed read-only file system.
Args:
logical_home_folder: A logical home dir of all files (/a/b/c/...).
physical_home_folder: A physical location on the file system (/x/y).
Returns:
A new instance of the object.
"""
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._physical_home_folder = AbstractFileSystem.normpath(
physical_home_folder)
def _logical_to_physical(self, filename):
filename = AbstractFileSystem.normpath(filename)
if not (self._logical_home_folder and self._physical_home_folder):
return filename
filename = os.path.join(
self._physical_home_folder,
os.path.relpath(filename, self._logical_home_folder))
return AbstractFileSystem.normpath(filename)
def _physical_to_logical(self, filename):
filename = AbstractFileSystem.normpath(filename)
if not (self._logical_home_folder and self._physical_home_folder):
return filename
filename = os.path.join(
self._logical_home_folder,
os.path.relpath(filename, self._physical_home_folder))
return AbstractFileSystem.normpath(filename)
def isfile(self, filename):
return os.path.isfile(self._logical_to_physical(filename))
def get(self, filename):
return open(self._logical_to_physical(filename), 'rb')
def put(self, unused_filename, unused_stream):
raise Exception('Not implemented.')
def delete(self, unused_filename):
raise Exception('Not implemented.')
def list(self, root_dir):
"""Lists all files in a directory."""
files = []
for dirname, unused_dirnames, filenames in os.walk(
self._logical_to_physical(root_dir)):
for filename in filenames:
files.append(
self._physical_to_logical(os.path.join(dirname, filename)))
return sorted(files)
def get_jinja_environ(self, dir_names):
"""Configure the environment for Jinja templates."""
physical_dir_names = []
for dir_name in dir_names:
physical_dir_names.append(self._logical_to_physical(dir_name))
return jinja_utils.create_jinja_environment(
loader=jinja2.FileSystemLoader(physical_dir_names))
def is_read_write(self):
return False
class FileMetadataEntity(BaseEntity):
"""An entity to represent a file metadata; absolute file name is a key."""
# TODO(psimakov): do we need 'version' to support concurrent updates
# TODO(psimakov): can we put 'data' here and still have fast isfile/list?
created_on = db.DateTimeProperty(auto_now_add=True, indexed=False)
updated_on = db.DateTimeProperty(indexed=True)
# Draft file is just as any other file. It's up to the consumer of the file
# to decide whether to treat draft differently (not to serve it to the
# public, for example). This class does not care and just stores the bit.
is_draft = db.BooleanProperty(indexed=False)
size = db.IntegerProperty(indexed=False)
class FileDataEntity(BaseEntity):
"""An entity to represent file content; absolute file name is a key."""
data = db.BlobProperty()
class FileStreamWrapped(object):
"""A class that wraps a file stream, but adds extra attributes to it."""
def __init__(self, metadata, data):
self._metadata = metadata
self._data = data
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
data = self._data
self._data = ''
return data
@property
def metadata(self):
return self._metadata
class StringStream(object):
"""A wrapper to pose a string as a UTF-8 byte stream."""
def __init__(self, text):
self._data = unicode.encode(text, 'utf-8')
def read(self):
"""Emulates stream.read(). Returns all bytes and emulates EOF."""
data = self._data
self._data = ''
return data
def string_to_stream(text):
return StringStream(text)
def stream_to_string(stream):
return stream.read().decode('utf-8')
class VirtualFileSystemTemplateLoader(jinja2.BaseLoader):
"""Loader of jinja2 templates from a virtual file system."""
def __init__(self, fs, logical_home_folder, dir_names):
self._fs = fs
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._dir_names = []
if dir_names:
for dir_name in dir_names:
self._dir_names.append(AbstractFileSystem.normpath(dir_name))
def get_source(self, unused_environment, template):
for dir_name in self._dir_names:
filename = AbstractFileSystem.normpath(
os.path.join(dir_name, template))
if self._fs.isfile(filename):
return self._fs.get(
filename).read().decode('utf-8'), filename, True
raise jinja2.TemplateNotFound(template)
def list_templates(self):
all_templates = []
for dir_name in self._dir_names:
all_templates += self._fs.list(dir_name)
return all_templates
class DatastoreBackedFileSystem(object):
"""A read-write file system backed by a datastore."""
@classmethod
def make_key(cls, filename):
return 'vfs:dsbfs:%s' % filename
def __init__(
self, ns, logical_home_folder,
inherits_from=None, inheritable_folders=None):
"""Creates a new instance of the datastore-backed file system.
Args:
ns: A datastore namespace to use for storing all data and metadata.
logical_home_folder: A logical home dir of all files (/a/b/c/...).
inherits_from: A file system to use for the inheritance.
inheritable_folders: A list of folders that support inheritance.
Returns:
A new instance of the object.
Raises:
Exception: if invalid inherits_from is given.
"""
# We cache files loaded via inherited fs; make sure they don't change.
if inherits_from and not isinstance(
inherits_from, LocalReadOnlyFileSystem):
raise Exception('Can only inherit from LocalReadOnlyFileSystem.')
self._ns = ns
self._logical_home_folder = AbstractFileSystem.normpath(
logical_home_folder)
self._inherits_from = inherits_from
self._inheritable_folders = []
if inheritable_folders:
for folder in inheritable_folders:
self._inheritable_folders.append(AbstractFileSystem.normpath(
folder))
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
# Don't intercept access to private methods and attributes.
if name.startswith('_'):
return attr
# Do intercept all methods.
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
"""Set proper namespace for each method call."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._ns)
return attr(*args, **kwargs)
finally:
namespace_manager.set_namespace(old_namespace)
return newfunc
# Don't intercept access to non-method attributes.
return attr
def _logical_to_physical(self, filename):
filename = AbstractFileSystem.normpath(filename)
# For now we only support '/' as a physical folder name.
if self._logical_home_folder == '/':
return filename
if not filename.startswith(self._logical_home_folder):
raise Exception(
'Expected path \'%s\' to start with a prefix \'%s\'.' % (
filename, self._logical_home_folder))
rel_path = filename[len(self._logical_home_folder):]
if not rel_path.startswith('/'):
rel_path = '/%s' % rel_path
return rel_path
def physical_to_logical(self, filename):
"""Converts an internal filename to and external filename."""
# This class receives and stores absolute file names. The logical
# filename is the external file name. The physical filename is an
# internal filename. This function does the convertions.
# Let's say you want to store a file named '/assets/img/foo.png'.
# This would be a physical filename in the VFS. But the put() operation
# expects an absolute filename from the root of the app installation,
# i.e. something like '/dev/apps/coursebuilder/assets/img/foo.png',
# which is called a logical filename. This is a legacy expectation from
# the days the course was defined as files on the file system.
#
# This function will do the conversion you need.
return self._physical_to_logical(filename)
def _physical_to_logical(self, filename):
filename = AbstractFileSystem.normpath(filename)
# For now we only support '/' as a physical folder name.
if filename and not filename.startswith('/'):
filename = '/' + filename
if self._logical_home_folder == '/':
return filename
return '%s%s' % (self._logical_home_folder, filename)
def _can_inherit(self, filename):
"""Checks if a file can be inherited from a parent file system."""
for prefix in self._inheritable_folders:
if filename.startswith(prefix):
return True
return False
def get(self, afilename):
"""Gets a file from a datastore. Raw bytes stream, no encodings."""
filename = self._logical_to_physical(afilename)
# Load from cache.
result = MemcacheManager.get(
self.make_key(filename), namespace=self._ns)
if result:
return result
if NO_OBJECT == result:
return None
# Load from a datastore.
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
data = FileDataEntity.get_by_key_name(filename)
if data:
result = FileStreamWrapped(metadata, data.data)
MemcacheManager.set(
self.make_key(filename), result, namespace=self._ns)
return result
result = None
metadata = None
# Load from parent fs.
if self._inherits_from and self._can_inherit(filename):
result = self._inherits_from.get(afilename)
# Cache result.
if result:
result = FileStreamWrapped(metadata, result.read())
MemcacheManager.set(
self.make_key(filename), result, namespace=self._ns)
else:
MemcacheManager.set(
self.make_key(filename), NO_OBJECT, namespace=self._ns)
return result
@db.transactional(xg=True)
def put(self, filename, stream, is_draft=False, metadata_only=False):
"""Puts a file stream to a database. Raw bytes stream, no encodings."""
self.non_transactional_put(
filename, stream, is_draft=is_draft, metadata_only=metadata_only)
def non_transactional_put(
self, filename, stream, is_draft=False, metadata_only=False):
"""Non-transactional put; use only when transactions are impossible."""
filename = self._logical_to_physical(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if not metadata:
metadata = FileMetadataEntity(key_name=filename)
metadata.updated_on = datetime.datetime.now()
metadata.is_draft = is_draft
if not metadata_only:
# We operate with raw bytes. The consumer must deal with encoding.
raw_bytes = stream.read()
metadata.size = len(raw_bytes)
data = FileDataEntity(key_name=filename)
data.data = raw_bytes
data.put()
metadata.put()
MemcacheManager.delete(self.make_key(filename), namespace=self._ns)
@db.transactional(xg=True)
def delete(self, filename):
filename = self._logical_to_physical(filename)
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
metadata.delete()
data = FileDataEntity(key_name=filename)
if data:
data.delete()
MemcacheManager.delete(self.make_key(filename), namespace=self._ns)
def isfile(self, afilename):
"""Checks file existence by looking up the datastore row."""
filename = self._logical_to_physical(afilename)
# Check cache.
result = MemcacheManager.get(
self.make_key(filename), namespace=self._ns)
if result:
return True
if NO_OBJECT == result:
return False
# Check datastore.
metadata = FileMetadataEntity.get_by_key_name(filename)
if metadata:
return True
result = False
# Check with parent fs.
if self._inherits_from and self._can_inherit(filename):
result = self._inherits_from.isfile(afilename)
# Put NO_OBJECT marker into memcache to avoid repeated lookups.
if not result:
MemcacheManager.set(
self.make_key(filename), NO_OBJECT, namespace=self._ns)
return result
def list(self, dir_name, include_inherited=False):
"""Lists all files in a directory by using datastore query.
Args:
dir_name: string. Directory to list contents of.
include_inherited: boolean. If True, includes all inheritable files
from the parent filesystem.
Returns:
List of string. Lexicographically-sorted unique filenames
recursively found in dir_name.
"""
dir_name = self._logical_to_physical(dir_name)
result = set()
keys = FileMetadataEntity.all(keys_only=True)
for key in keys.fetch(1000):
filename = key.name()
if filename.startswith(dir_name):
result.add(self._physical_to_logical(filename))
if include_inherited and self._inherits_from:
for inheritable_folder in self._inheritable_folders:
result.update(set(self._inherits_from.list(
self._physical_to_logical(inheritable_folder))))
return sorted(list(result))
def get_jinja_environ(self, dir_names):
return jinja_utils.create_jinja_environment(
loader=VirtualFileSystemTemplateLoader(
self, self._logical_home_folder, dir_names))
def is_read_write(self):
return True
def run_all_unit_tests():
"""Runs all unit tests in the project."""
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import copy
from datetime import datetime
import logging
import os
import pickle
import sys
import appengine_config
from common.schema_fields import FieldRegistry
from common.schema_fields import SchemaField
import common.tags
from tools import verify
import yaml
import models
from models import MemcacheManager
import progress
import review
import transforms
import vfs
COURSE_MODEL_VERSION_1_2 = '1.2'
COURSE_MODEL_VERSION_1_3 = '1.3'
# 1.4 assessments are JavaScript files
ASSESSMENT_MODEL_VERSION_1_4 = '1.4'
# 1.5 assessments are HTML text, with embedded question tags
ASSESSMENT_MODEL_VERSION_1_5 = '1.5'
SUPPORTED_ASSESSMENT_MODEL_VERSIONS = frozenset(
[ASSESSMENT_MODEL_VERSION_1_4, ASSESSMENT_MODEL_VERSION_1_5])
# Date format string for validating input in ISO 8601 format without a
# timezone. All such strings are assumed to refer to UTC datetimes.
# Example: '2013-03-21 13:00'
ISO_8601_DATE_FORMAT = '%Y-%m-%d %H:%M'
def deep_dict_merge(real_values_dict, default_values_dict):
"""Merges default and real value dictionaries recursively."""
def _deep_merge(real_values, default_values):
"""Updates real with default values recursively."""
# Recursively merge dictionaries.
for key, value in real_values.items():
default_value = default_values.get(key)
if (default_value and isinstance(
value, dict) and isinstance(default_value, dict)):
_deep_merge(value, default_value)
# Copy over other values.
for key, value in default_values.items():
if key not in real_values:
real_values[key] = value
result = {}
if real_values_dict:
result = copy.deepcopy(real_values_dict)
_deep_merge(result, default_values_dict)
return result
# The template dict for all courses
course_template_yaml = open(os.path.join(os.path.dirname(
__file__), '../course_template.yaml'), 'r')
COURSE_TEMPLATE_DICT = yaml.safe_load(
course_template_yaml.read().decode('utf-8'))
# Here are the defaults for a new course.
DEFAULT_COURSE_YAML_DICT = {
'course': {
'title': 'UNTITLED COURSE',
'locale': 'en_US',
'main_image': {},
'browsable': True,
'now_available': False},
'preview': {},
'unit': {},
'reg_form': {
'can_register': True,
'additional_registration_fields': ''
}
}
# Here are the defaults for an existing course.
DEFAULT_EXISTING_COURSE_YAML_DICT = deep_dict_merge(
{'course': {
'now_available': True}},
DEFAULT_COURSE_YAML_DICT)
# Here is the default course.yaml for a new course.
EMPTY_COURSE_YAML = u"""# my new course.yaml
course:
title: 'New Course by %s'
now_available: False
"""
# Here are the default assessment weights corresponding to the sample course.
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS = {'Pre': 0, 'Mid': 30, 'Fin': 70}
# Indicates that an assessment is graded automatically.
AUTO_GRADER = 'auto'
# Indicates that an assessment is graded by a human.
HUMAN_GRADER = 'human'
# Allowed graders.
ALLOWED_GRADERS = [AUTO_GRADER, HUMAN_GRADER]
# Keys in unit.workflow (when it is converted to a dict).
GRADER_KEY = 'grader'
MATCHER_KEY = 'matcher'
SUBMISSION_DUE_DATE_KEY = 'submission_due_date'
REVIEW_DUE_DATE_KEY = 'review_due_date'
REVIEW_MIN_COUNT_KEY = 'review_min_count'
REVIEW_WINDOW_MINS_KEY = 'review_window_mins'
DEFAULT_REVIEW_MIN_COUNT = 2
DEFAULT_REVIEW_WINDOW_MINS = 60
# Keys specific to human-graded assessments.
HUMAN_GRADED_ASSESSMENT_KEY_LIST = [
MATCHER_KEY, REVIEW_MIN_COUNT_KEY, REVIEW_WINDOW_MINS_KEY,
SUBMISSION_DUE_DATE_KEY, REVIEW_DUE_DATE_KEY
]
# The name for the peer review assessment used in the sample v1.2 CSV file.
# This is here so that a peer review assessment example is available when
# Course Builder loads with the sample course. However, in general, peer
# review assessments should only be specified in Course Builder v1.4 or
# later (via the web interface).
LEGACY_REVIEW_ASSESSMENT = 'ReviewAssessmentExample'
# This value is the default workflow for assessment grading,
DEFAULT_AUTO_GRADER_WORKFLOW = yaml.safe_dump({
GRADER_KEY: AUTO_GRADER
}, default_flow_style=False)
# This value is meant to be used only for the human-reviewed assessments in the
# sample v1.2 Power Searching course.
LEGACY_HUMAN_GRADER_WORKFLOW = yaml.safe_dump({
GRADER_KEY: HUMAN_GRADER,
MATCHER_KEY: review.PEER_MATCHER,
SUBMISSION_DUE_DATE_KEY: '2014-03-14 12:00',
REVIEW_DUE_DATE_KEY: '2014-03-21 12:00',
REVIEW_MIN_COUNT_KEY: DEFAULT_REVIEW_MIN_COUNT,
REVIEW_WINDOW_MINS_KEY: DEFAULT_REVIEW_WINDOW_MINS,
}, default_flow_style=False)
def is_editable_fs(app_context):
return isinstance(app_context.fs.impl, vfs.DatastoreBackedFileSystem)
def copy_attributes(source, target, converter):
"""Copies source object attributes into a target using a converter."""
for source_name, value in converter.items():
if value:
target_name = value[0]
target_type = value[1]
setattr(
target, target_name, target_type(getattr(source, source_name)))
def load_csv_course(app_context):
"""Loads course data from the CSV files."""
logging.info('Initializing datastore from CSV files.')
unit_file = os.path.join(app_context.get_data_home(), 'unit.csv')
lesson_file = os.path.join(app_context.get_data_home(), 'lesson.csv')
# Check files exist.
if (not app_context.fs.isfile(unit_file) or
not app_context.fs.isfile(lesson_file)):
return None, None
unit_stream = app_context.fs.open(unit_file)
lesson_stream = app_context.fs.open(lesson_file)
# Verify CSV file integrity.
units = verify.read_objects_from_csv_stream(
unit_stream, verify.UNITS_HEADER, verify.Unit)
lessons = verify.read_objects_from_csv_stream(
lesson_stream, verify.LESSONS_HEADER, verify.Lesson)
verifier = verify.Verifier()
verifier.verify_unit_fields(units)
verifier.verify_lesson_fields(lessons)
verifier.verify_unit_lesson_relationships(units, lessons)
assert verifier.errors == 0
assert verifier.warnings == 0
# Load data from CSV files into a datastore.
units = verify.read_objects_from_csv_stream(
app_context.fs.open(unit_file), verify.UNITS_HEADER, Unit12,
converter=verify.UNIT_CSV_TO_DB_CONVERTER)
lessons = verify.read_objects_from_csv_stream(
app_context.fs.open(lesson_file), verify.LESSONS_HEADER, Lesson12,
converter=verify.LESSON_CSV_TO_DB_CONVERTER)
return units, lessons
def index_units_and_lessons(course):
"""Index all 'U' type units and their lessons. Indexes are 1-based."""
unit_index = 1
for unit in course.get_units():
if verify.UNIT_TYPE_UNIT == unit.type:
unit._index = unit_index # pylint: disable-msg=protected-access
unit_index += 1
lesson_index = 1
for lesson in course.get_lessons(unit.unit_id):
lesson._index = ( # pylint: disable-msg=protected-access
lesson_index)
lesson_index += 1
def create_course_registry():
"""Create the registry for course properties."""
reg = FieldRegistry('Basic Course Settings', description='Course Settings')
# Course level settings.
course_opts = reg.add_sub_registry('course', 'Course Config')
course_opts.add_property(
SchemaField('course:title', 'Course Name', 'string'))
course_opts.add_property(
SchemaField(
'course:admin_user_emails', 'Course Admin Emails', 'string',
description='A space-separated list of email addresses of course '
'administrators. Each email address must be placed between \'[\' '
'and \']\'.'))
course_opts.add_property(
SchemaField(
'course:forum_email', 'Forum Email', 'string', optional=True,
description='Email for the forum, e.g. '
'\'My-Course@googlegroups.com\'.'))
course_opts.add_property(SchemaField(
'course:announcement_list_email', 'Announcement List Email', 'string',
optional=True, description='Email for the mailing list where students '
'can register to receive course announcements, e.g. '
'\'My-Course-Announce@googlegroups.com\''))
course_opts.add_property(SchemaField('course:locale', 'Locale', 'string'))
course_opts.add_property(SchemaField(
'course:start_date', 'Course Start Date', 'string', optional=True))
course_opts.add_property(SchemaField(
'course:now_available', 'Make Course Available', 'boolean'))
course_opts.add_property(SchemaField(
'course:browsable', 'Make Course Browsable', 'boolean',
description='Allow non-registered users to view course content.'))
# Course registration settings.
reg_opts = reg.add_sub_registry('reg_form', 'Student Registration Options')
reg_opts.add_property(SchemaField(
'reg_form:can_register', 'Enable Registrations', 'boolean',
description='Checking this box allows new students to register for '
'the course.'))
reg_opts.add_property(SchemaField(
'reg_form:additional_registration_fields', 'Additional Fields', 'html',
description='Additional registration text or questions.'))
# Course homepage settings.
homepage_opts = reg.add_sub_registry('homepage', 'Homepage Settings')
homepage_opts.add_property(SchemaField(
'course:instructor_details', 'Instructor Details', 'html',
optional=True))
homepage_opts.add_property(SchemaField(
'course:blurb', 'Course Abstract', 'html', optional=True,
description='Text, shown on the course homepage, that explains what '
'the course is about.',
extra_schema_dict_values={
'supportCustomTags': common.tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags':
common.tags.EditorBlacklists.COURSE_SCOPE}))
homepage_opts.add_property(SchemaField(
'course:main_video:url', 'Course Video', 'url', optional=True,
description='URL for the preview video shown on the course homepage '
'(e.g. http://www.youtube.com/embed/Kdg2drcUjYI ).'))
homepage_opts.add_property(SchemaField(
'course:main_image:url', 'Course Image', 'string', optional=True,
description='URL for the preview image shown on the course homepage. '
'This will only be shown if no course video is specified.'))
homepage_opts.add_property(SchemaField(
'course:main_image:alt_text', 'Alternate Text', 'string',
optional=True,
description='Alt text for the preview image on the course homepage.'))
return reg
class AbstractCachedObject(object):
"""Abstract serializable versioned object that can stored in memcache."""
@classmethod
def _make_key(cls):
# The course content files may change between deployment. To avoid
# reading old cached values by the new version of the application we
# add deployment version to the key. Now each version of the
# application can put/get its own version of the course and the
# deployment.
return 'course:model:pickle:%s:%s' % (
cls.VERSION, os.environ.get('CURRENT_VERSION_ID'))
@classmethod
def new_memento(cls):
"""Creates new empty memento instance; must be pickle serializable."""
raise Exception('Not implemented')
@classmethod
def instance_from_memento(cls, unused_app_context, unused_memento):
"""Creates instance from serializable memento."""
raise Exception('Not implemented')
@classmethod
def memento_from_instance(cls, unused_instance):
"""Creates serializable memento from instance."""
raise Exception('Not implemented')
@classmethod
def load(cls, app_context):
"""Loads instance from memcache; does not fail on errors."""
try:
binary_data = MemcacheManager.get(
cls._make_key(),
namespace=app_context.get_namespace_name())
if binary_data:
memento = cls.new_memento()
memento.deserialize(binary_data)
return cls.instance_from_memento(app_context, memento)
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to load object \'%s\' from memcache. %s',
cls._make_key(), e)
return None
@classmethod
def save(cls, app_context, instance):
"""Saves instance to memcache."""
MemcacheManager.set(
cls._make_key(),
cls.memento_from_instance(instance).serialize(),
namespace=app_context.get_namespace_name())
@classmethod
def delete(cls, app_context):
"""Deletes instance from memcache."""
MemcacheManager.delete(
cls._make_key(),
namespace=app_context.get_namespace_name())
def serialize(self):
"""Saves instance to a pickle representation."""
return pickle.dumps(self.__dict__)
def deserialize(self, binary_data):
"""Loads instance from a pickle representation."""
adict = pickle.loads(binary_data)
if self.version != adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self.__dict__.update(adict)
class Unit12(object):
"""An object to represent a Unit, Assessment or Link (version 1.2)."""
def __init__(self):
self.unit_id = '' # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
@property
def href(self):
assert verify.UNIT_TYPE_LINK == self.type
return self.unit_id
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
@property
def workflow_yaml(self):
"""Returns the workflow as a YAML text string."""
assert verify.UNIT_TYPE_ASSESSMENT == self.type
if self.unit_id == LEGACY_REVIEW_ASSESSMENT:
return LEGACY_HUMAN_GRADER_WORKFLOW
else:
return DEFAULT_AUTO_GRADER_WORKFLOW
@property
def workflow(self):
"""Returns the workflow as an object."""
return Workflow(self.workflow_yaml)
class Lesson12(object):
"""An object to represent a Lesson (version 1.2)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.scored = False
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.activity = ''
self.activity_title = ''
self.activity_listed = True
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def now_available(self):
return True
@property
def index(self):
return self._index
class CachedCourse12(AbstractCachedObject):
"""A representation of a Course12 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_2
def __init__(self, units=None, lessons=None, unit_id_to_lessons=None):
self.version = self.VERSION
self.units = units
self.lessons = lessons
self.unit_id_to_lessons = unit_id_to_lessons
@classmethod
def new_memento(cls):
return CachedCourse12()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel12(
app_context, units=memento.units, lessons=memento.lessons,
unit_id_to_lessons=memento.unit_id_to_lessons)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse12(
units=course.units, lessons=course.lessons,
unit_id_to_lessons=course.unit_id_to_lessons)
class CourseModel12(object):
"""A course defined in terms of CSV files (version 1.2)."""
VERSION = COURSE_MODEL_VERSION_1_2
@classmethod
def load(cls, app_context):
"""Loads course data into a model."""
course = CachedCourse12.load(app_context)
if not course:
units, lessons = load_csv_course(app_context)
if units and lessons:
course = CourseModel12(app_context, units, lessons)
if course:
CachedCourse12.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lessons = {}
for lesson in lessons:
key = str(lesson.unit_id)
if key not in unit_id_to_lessons:
unit_id_to_lessons[key] = []
unit_id_to_lessons[key].append(lesson)
return unit_id_to_lessons
def __init__(
self, app_context,
units=None, lessons=None, unit_id_to_lessons=None):
self._app_context = app_context
self._units = []
self._lessons = []
self._unit_id_to_lessons = {}
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lessons:
self._unit_id_to_lessons = unit_id_to_lessons
else:
self._unit_id_to_lessons = (
self._make_unit_id_to_lessons_lookup_dict(self._lessons))
index_units_and_lessons(self)
@property
def app_context(self):
return self._app_context
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lessons(self):
return self._unit_id_to_lessons
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
return self._unit_id_to_lessons.get(str(unit_id), [])
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def get_review_form_filename(self, unit_id):
"""Returns the corresponding review form filename."""
return 'assets/js/review-%s.js' % unit_id
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit and verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def _get_assessment_as_dict(self, filename):
"""Returns the Python dict representation of an assessment file."""
root_name = 'assessment'
context = self._app_context
assessment_content = context.fs.impl.get(os.path.join(
context.get_home(), filename)).read()
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
return assessment
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._get_assessment_as_dict(
self.get_assessment_filename(unit.unit_id))
def get_assessment_model_version(self, unused_unit):
return ASSESSMENT_MODEL_VERSION_1_4
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._get_assessment_as_dict(
self.get_review_form_filename(unit.unit_id))
def get_activity_filename(self, unit_id, lesson_id):
"""Returns activity base filename."""
return 'assets/js/activity-%s.%s.js' % (unit_id, lesson_id)
def find_lesson_by_id(self, unit, lesson_id):
"""Finds a lesson given its id (or 1-based index in this model)."""
index = int(lesson_id) - 1
return self.get_lessons(unit.unit_id)[index]
def to_json(self):
"""Creates JSON representation of this instance."""
adict = copy.deepcopy(self)
del adict._app_context
return transforms.dumps(
adict,
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Unit13(object):
"""An object to represent a Unit, Assessment or Link (version 1.3)."""
def __init__(self):
self.unit_id = 0 # primary key
self.type = ''
self.title = ''
self.release_date = ''
self.now_available = False
# Units of 'U' types have 1-based index. An index is automatically
# computed.
self._index = None
# Only valid for the unit.type == verify.UNIT_TYPE_LINK.
self.href = None
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.weight = 1
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.html_content = None
self.html_check_answers = False
self.html_review_form = None
# Only valid for the unit.type == verify.UNIT_TYPE_ASSESSMENT.
self.workflow_yaml = DEFAULT_AUTO_GRADER_WORKFLOW
@property
def index(self):
assert verify.UNIT_TYPE_UNIT == self.type
return self._index
@property
def workflow(self):
"""Returns the workflow as an object."""
assert verify.UNIT_TYPE_ASSESSMENT == self.type
workflow = Workflow(self.workflow_yaml)
return workflow
class Lesson13(object):
"""An object to represent a Lesson (version 1.3)."""
def __init__(self):
self.lesson_id = 0 # primary key
self.unit_id = 0 # unit.unit_id of parent
self.title = ''
self.scored = False
self.objectives = ''
self.video = ''
self.notes = ''
self.duration = ''
self.now_available = False
self.has_activity = False
self.activity_title = ''
self.activity_listed = True
# Lessons have 1-based index inside the unit they belong to. An index
# is automatically computed.
self._index = None
@property
def index(self):
return self._index
@property
def activity(self):
"""A symbolic name to old attribute."""
return self.has_activity
class PersistentCourse13(object):
"""A representation of a Course13 optimized for persistence."""
COURSES_FILENAME = 'data/course.json'
def __init__(self, next_id=None, units=None, lessons=None):
self.version = CourseModel13.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
def to_dict(self):
"""Saves object attributes into a dict."""
result = {}
result['version'] = str(self.version)
result['next_id'] = int(self.next_id)
units = []
for unit in self.units:
units.append(transforms.instance_to_dict(unit))
result['units'] = units
lessons = []
for lesson in self.lessons:
lessons.append(transforms.instance_to_dict(lesson))
result['lessons'] = lessons
return result
def _from_dict(self, adict):
"""Loads instance attributes from the dict."""
self.next_id = int(adict.get('next_id'))
self.units = []
unit_dicts = adict.get('units')
if unit_dicts:
for unit_dict in unit_dicts:
unit = Unit13()
defaults = {
'workflow_yaml': DEFAULT_AUTO_GRADER_WORKFLOW,
'html_content': '',
'html_check_answers': False,
'html_review_form': ''}
transforms.dict_to_instance(unit_dict, unit, defaults=defaults)
self.units.append(unit)
self.lessons = []
lesson_dicts = adict.get('lessons')
if lesson_dicts:
for lesson_dict in lesson_dicts:
lesson = Lesson13()
defaults = {
'activity_listed': True,
'scored': False}
transforms.dict_to_instance(
lesson_dict, lesson, defaults=defaults)
self.lessons.append(lesson)
@classmethod
def save(cls, app_context, course):
"""Saves course to datastore."""
persistent = PersistentCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons)
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
app_context.fs.put(filename, vfs.FileStreamWrapped(
None, persistent.serialize()))
@classmethod
def load(cls, app_context):
"""Loads course from datastore."""
fs = app_context.fs.impl
filename = fs.physical_to_logical(cls.COURSES_FILENAME)
if app_context.fs.isfile(filename):
persistent = PersistentCourse13()
persistent.deserialize(app_context.fs.get(filename))
return CourseModel13(
app_context, next_id=persistent.next_id,
units=persistent.units, lessons=persistent.lessons)
return None
def serialize(self):
"""Saves instance to a JSON representation."""
adict = self.to_dict()
json_text = transforms.dumps(adict)
return json_text.encode('utf-8')
def deserialize(self, binary_data):
"""Loads instance from a JSON representation."""
json_text = binary_data.decode('utf-8')
adict = transforms.loads(json_text)
if self.version != adict.get('version'):
raise Exception('Expected version %s, found %s.' % (
self.version, adict.get('version')))
self._from_dict(adict)
class CachedCourse13(AbstractCachedObject):
"""A representation of a Course13 optimized for storing in memcache."""
VERSION = COURSE_MODEL_VERSION_1_3
def __init__(
self, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
self.version = self.VERSION
self.next_id = next_id
self.units = units
self.lessons = lessons
# This is almost the same as PersistentCourse13 above, but it also
# stores additional indexes used for performance optimizations. There
# is no need to persist these indexes in durable storage, but it is
# nice to have them in memcache.
self.unit_id_to_lesson_ids = unit_id_to_lesson_ids
@classmethod
def new_memento(cls):
return CachedCourse13()
@classmethod
def instance_from_memento(cls, app_context, memento):
return CourseModel13(
app_context, next_id=memento.next_id,
units=memento.units, lessons=memento.lessons,
unit_id_to_lesson_ids=memento.unit_id_to_lesson_ids)
@classmethod
def memento_from_instance(cls, course):
return CachedCourse13(
next_id=course.next_id,
units=course.units, lessons=course.lessons,
unit_id_to_lesson_ids=course.unit_id_to_lesson_ids)
class CourseModel13(object):
"""A course defined in terms of objects (version 1.3)."""
VERSION = COURSE_MODEL_VERSION_1_3
@classmethod
def load(cls, app_context):
"""Loads course from memcache or persistence."""
course = CachedCourse13.load(app_context)
if not course:
course = PersistentCourse13.load(app_context)
if course:
CachedCourse13.save(app_context, course)
return course
@classmethod
def _make_unit_id_to_lessons_lookup_dict(cls, lessons):
"""Creates an index of unit.unit_id to unit.lessons."""
unit_id_to_lesson_ids = {}
for lesson in lessons:
key = str(lesson.unit_id)
if key not in unit_id_to_lesson_ids:
unit_id_to_lesson_ids[key] = []
unit_id_to_lesson_ids[key].append(str(lesson.lesson_id))
return unit_id_to_lesson_ids
def __init__(
self, app_context, next_id=None, units=None, lessons=None,
unit_id_to_lesson_ids=None):
# Init default values.
self._app_context = app_context
self._next_id = 1 # a counter for creating sequential entity ids
self._units = []
self._lessons = []
self._unit_id_to_lesson_ids = {}
# These array keep dirty object in current transaction.
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
# Set provided values.
if next_id:
self._next_id = next_id
if units:
self._units = units
if lessons:
self._lessons = lessons
if unit_id_to_lesson_ids:
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
else:
self._index()
@property
def app_context(self):
return self._app_context
@property
def next_id(self):
return self._next_id
@property
def units(self):
return self._units
@property
def lessons(self):
return self._lessons
@property
def unit_id_to_lesson_ids(self):
return self._unit_id_to_lesson_ids
def _get_next_id(self):
"""Allocates next id in sequence."""
next_id = self._next_id
self._next_id += 1
return next_id
def _index(self):
"""Indexes units and lessons."""
self._unit_id_to_lesson_ids = self._make_unit_id_to_lessons_lookup_dict(
self._lessons)
index_units_and_lessons(self)
def is_dirty(self):
"""Checks if course object has been modified and needs to be saved."""
return self._dirty_units or self._dirty_lessons
def _flush_deleted_objects(self):
"""Delete files owned by deleted objects."""
# TODO(psimakov): handle similarly add_unit() and set_assessment()
# To delete an activity/assessment one must look up its filename. This
# requires a valid unit/lesson. If unit was deleted it's no longer
# found in _units, same for lesson. So we temporarily install deleted
# unit/lesson array instead of actual. We also temporarily empty
# so _unit_id_to_lesson_ids is not accidentally used. This is a hack,
# and we will improve it as object model gets more complex, but for
# now it works fine.
units = self._units
lessons = self._lessons
unit_id_to_lesson_ids = self._unit_id_to_lesson_ids
try:
self._units = self._deleted_units
self._lessons = self._deleted_lessons
self._unit_id_to_lesson_ids = None
# Delete owned assessments.
for unit in self._deleted_units:
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
self._delete_assessment(unit)
# Delete owned activities.
for lesson in self._deleted_lessons:
if lesson.has_activity:
self._delete_activity(lesson)
finally:
self._units = units
self._lessons = lessons
self._unit_id_to_lesson_ids = unit_id_to_lesson_ids
def _update_dirty_objects(self):
"""Update files owned by course."""
fs = self.app_context.fs
# Update state of owned assessments.
for unit in self._dirty_units:
unit = self.find_unit_by_id(unit.unit_id)
if not unit or verify.UNIT_TYPE_ASSESSMENT != unit.type:
continue
path = fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not unit.now_available)
# Update state of owned activities.
for lesson in self._dirty_lessons:
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson or not lesson.has_activity:
continue
path = fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if fs.isfile(path):
fs.put(
path, None, metadata_only=True,
is_draft=not lesson.now_available)
def save(self):
"""Saves course to datastore and memcache."""
self._flush_deleted_objects()
self._update_dirty_objects()
self._dirty_units = []
self._dirty_lessons = []
self._deleted_units = []
self._deleted_lessons = []
self._index()
PersistentCourse13.save(self._app_context, self)
CachedCourse13.delete(self._app_context)
def get_units(self):
return self._units[:]
def get_lessons(self, unit_id):
lesson_ids = self._unit_id_to_lesson_ids.get(str(unit_id))
lessons = []
if lesson_ids:
for lesson_id in lesson_ids:
lessons.append(self.find_lesson_by_id(None, lesson_id))
return lessons
def get_assessment_filename(self, unit_id):
"""Returns assessment base filename."""
unit = self.find_unit_by_id(unit_id)
assert unit
assert verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/assessment-%s.js' % unit.unit_id
def get_review_form_filename(self, unit_id):
"""Returns review form filename."""
unit = self.find_unit_by_id(unit_id)
assert unit
assert verify.UNIT_TYPE_ASSESSMENT == unit.type
return 'assets/js/review-%s.js' % unit.unit_id
def get_activity_filename(self, unused_unit_id, lesson_id):
"""Returns activity base filename."""
lesson = self.find_lesson_by_id(None, lesson_id)
assert lesson
if lesson.has_activity:
return 'assets/js/activity-%s.js' % lesson_id
return None
def find_unit_by_id(self, unit_id):
"""Finds a unit given its id."""
for unit in self._units:
if str(unit.unit_id) == str(unit_id):
return unit
return None
def find_lesson_by_id(self, unused_unit, lesson_id):
"""Finds a lesson given its id."""
for lesson in self._lessons:
if str(lesson.lesson_id) == str(lesson_id):
return lesson
return None
def add_unit(self, unit_type, title):
"""Adds a brand new unit."""
assert unit_type in verify.UNIT_TYPES
unit = Unit13()
unit.type = unit_type
unit.unit_id = self._get_next_id()
unit.title = title
unit.now_available = False
self._units.append(unit)
self._index()
self._dirty_units.append(unit)
return unit
def add_lesson(self, unit, title):
"""Adds brand new lesson to a unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
lesson = Lesson13()
lesson.lesson_id = self._get_next_id()
lesson.unit_id = unit.unit_id
lesson.title = title
lesson.now_available = False
self._lessons.append(lesson)
self._index()
self._dirty_lessons.append(lesson)
return lesson
def move_lesson_to(self, lesson, unit):
"""Moves a lesson to another unit."""
unit = self.find_unit_by_id(unit.unit_id)
assert unit
assert verify.UNIT_TYPE_UNIT == unit.type
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
assert lesson
lesson.unit_id = unit.unit_id
self._index()
return lesson
def _delete_activity(self, lesson):
"""Deletes activity."""
filename = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(None, lesson.lesson_id))
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
return True
return False
def _delete_assessment(self, unit):
"""Deletes assessment."""
files_deleted_count = 0
filenames = [
self._app_context.fs.impl.physical_to_logical(
self.get_assessment_filename(unit.unit_id)),
self._app_context.fs.impl.physical_to_logical(
self.get_review_form_filename(unit.unit_id))]
for filename in filenames:
if self.app_context.fs.isfile(filename):
self.app_context.fs.delete(filename)
files_deleted_count += 1
return bool(files_deleted_count)
def delete_all(self):
"""Deletes all course files."""
for entity in self._app_context.fs.impl.list(
appengine_config.BUNDLE_ROOT):
self._app_context.fs.impl.delete(entity)
assert not self._app_context.fs.impl.list(appengine_config.BUNDLE_ROOT)
CachedCourse13.delete(self._app_context)
def delete_lesson(self, lesson):
"""Delete a lesson."""
lesson = self.find_lesson_by_id(None, lesson.lesson_id)
if not lesson:
return False
self._lessons.remove(lesson)
self._index()
self._deleted_lessons.append(lesson)
self._dirty_lessons.append(lesson)
return True
def delete_unit(self, unit):
"""Deletes a unit."""
unit = self.find_unit_by_id(unit.unit_id)
if not unit:
return False
for lesson in self.get_lessons(unit.unit_id):
self.delete_lesson(lesson)
self._units.remove(unit)
self._index()
self._deleted_units.append(unit)
self._dirty_units.append(unit)
return True
def update_unit(self, unit):
"""Updates an existing unit."""
existing_unit = self.find_unit_by_id(unit.unit_id)
if not existing_unit:
return False
existing_unit.title = unit.title
existing_unit.release_date = unit.release_date
existing_unit.now_available = unit.now_available
if verify.UNIT_TYPE_LINK == existing_unit.type:
existing_unit.href = unit.href
if verify.UNIT_TYPE_ASSESSMENT == existing_unit.type:
existing_unit.weight = unit.weight
existing_unit.html_content = unit.html_content
existing_unit.html_check_answers = unit.html_check_answers
existing_unit.html_review_form = unit.html_review_form
existing_unit.workflow_yaml = unit.workflow_yaml
self._dirty_units.append(existing_unit)
return existing_unit
def update_lesson(self, lesson):
"""Updates an existing lesson."""
existing_lesson = self.find_lesson_by_id(
lesson.unit_id, lesson.lesson_id)
if not existing_lesson:
return False
existing_lesson.title = lesson.title
existing_lesson.unit_id = lesson.unit_id
existing_lesson.objectives = lesson.objectives
existing_lesson.video = lesson.video
existing_lesson.notes = lesson.notes
existing_lesson.activity_title = lesson.activity_title
self._index()
self._dirty_lessons.append(existing_lesson)
return existing_lesson
def reorder_units(self, order_data):
"""Reorder the units and lessons based on the order data given.
Args:
order_data: list of dict. Format is
The order_data is in the following format:
[
{'id': 0, 'lessons': [{'id': 0}, {'id': 1}, {'id': 2}]},
{'id': 1},
{'id': 2, 'lessons': [{'id': 0}, {'id': 1}]}
...
]
"""
reordered_units = []
unit_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
reordered_units.append(self.find_unit_by_id(unit_id))
unit_ids.add(unit_id)
assert len(unit_ids) == len(self._units)
self._units = reordered_units
reordered_lessons = []
lesson_ids = set()
for unit_data in order_data:
unit_id = unit_data['id']
unit = self.find_unit_by_id(unit_id)
assert unit
if verify.UNIT_TYPE_UNIT != unit.type:
continue
for lesson_data in unit_data['lessons']:
lesson_id = lesson_data['id']
reordered_lessons.append(
self.find_lesson_by_id(None, lesson_id))
lesson_ids.add((unit_id, lesson_id))
assert len(lesson_ids) == len(self._lessons)
self._lessons = reordered_lessons
self._index()
def _get_assessment_as_dict(self, filename):
"""Gets the content of an assessment file as a Python dict."""
path = self._app_context.fs.impl.physical_to_logical(filename)
root_name = 'assessment'
assessment_content = self.app_context.fs.get(path)
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
return assessment
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._get_assessment_as_dict(
self.get_assessment_filename(unit.unit_id))
def get_assessment_model_version(self, unit):
filename = self.get_assessment_filename(unit.unit_id)
path = self._app_context.fs.impl.physical_to_logical(filename)
if self.app_context.fs.isfile(path):
return ASSESSMENT_MODEL_VERSION_1_4
else:
return ASSESSMENT_MODEL_VERSION_1_5
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._get_assessment_as_dict(
self.get_review_form_filename(unit.unit_id))
def set_assessment_file_content(
self, unit, assessment_content, dest_filename, errors=None):
"""Updates the content of an assessment file on the file system."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(dest_filename)
root_name = 'assessment'
try:
content, noverify_text = verify.convert_javascript_to_python(
assessment_content, root_name)
assessment = verify.evaluate_python_expression_from_text(
content, root_name, verify.Assessment().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_assessment_instance(assessment, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(assessment_content),
is_draft=not unit.now_available)
def set_assessment_content(self, unit, assessment_content, errors=None):
"""Updates the content of an assessment."""
self.set_assessment_file_content(
unit,
assessment_content,
self.get_assessment_filename(unit.unit_id),
errors=errors
)
def set_review_form(self, unit, review_form, errors=None):
"""Sets the content of a review form."""
self.set_assessment_file_content(
unit,
review_form,
self.get_review_form_filename(unit.unit_id),
errors=errors
)
def set_activity_content(self, lesson, activity_content, errors=None):
"""Updates the content of an activity."""
if errors is None:
errors = []
path = self._app_context.fs.impl.physical_to_logical(
self.get_activity_filename(lesson.unit_id, lesson.lesson_id))
root_name = 'activity'
try:
content, noverify_text = verify.convert_javascript_to_python(
activity_content, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
except Exception: # pylint: disable-msg=broad-except
errors.append('Unable to parse %s:\n%s' % (
root_name,
str(sys.exc_info()[1])))
return
verifier = verify.Verifier()
try:
verifier.verify_activity_instance(activity, path)
except verify.SchemaException:
errors.append('Error validating %s\n' % root_name)
return
fs = self.app_context.fs
fs.put(
path, vfs.string_to_stream(activity_content),
is_draft=not lesson.now_available)
def import_from(self, src_course, errors):
"""Imports a content of another course into this course."""
def copy_unit12_into_unit13(src_unit, dst_unit):
"""Copies unit object attributes between versions."""
assert dst_unit.type == src_unit.type
dst_unit.title = src_unit.title
dst_unit.release_date = src_unit.release_date
dst_unit.now_available = src_unit.now_available
if verify.UNIT_TYPE_LINK == dst_unit.type:
dst_unit.href = src_unit.href
# Copy over the assessment. Note that we copy files directly and
# avoid all logical validations of their content. This is done for
# a purpose - at this layer we don't care what is in those files.
if verify.UNIT_TYPE_ASSESSMENT == dst_unit.type:
if src_unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
dst_unit.weight = (
DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[src_unit.unit_id])
filepath_mappings = [{
'src': src_course.get_assessment_filename(src_unit.unit_id),
'dst': self.get_assessment_filename(dst_unit.unit_id)
}, {
'src': src_course.get_review_form_filename(
src_unit.unit_id),
'dst': self.get_review_form_filename(dst_unit.unit_id)
}]
for mapping in filepath_mappings:
src_filename = os.path.join(
src_course.app_context.get_home(), mapping['src'])
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(), mapping['dst'])
self.app_context.fs.put(dst_filename, astream)
dst_unit.workflow_yaml = src_unit.workflow_yaml
def copy_unit13_into_unit13(src_unit, dst_unit):
"""Copies unit13 attributes to a new unit."""
copy_unit12_into_unit13(src_unit, dst_unit)
if verify.UNIT_TYPE_ASSESSMENT == dst_unit.type:
dst_unit.weight = src_unit.weight
def copy_lesson12_into_lesson13(
src_unit, src_lesson, unused_dst_unit, dst_lesson):
"""Copies lessons object attributes between versions."""
dst_lesson.objectives = src_lesson.objectives
dst_lesson.video = src_lesson.video
dst_lesson.notes = src_lesson.notes
dst_lesson.duration = src_lesson.duration
dst_lesson.has_activity = src_lesson.activity
dst_lesson.activity_title = src_lesson.activity_title
# Old model does not have this flag, but all lessons are available.
dst_lesson.now_available = True
# Copy over the activity. Note that we copy files directly and
# avoid all logical validations of their content. This is done for a
# purpose - at this layer we don't care what is in those files.
if src_lesson.activity:
src_filename = os.path.join(
src_course.app_context.get_home(),
src_course.get_activity_filename(
src_unit.unit_id, src_lesson.lesson_id))
if src_course.app_context.fs.isfile(src_filename):
astream = src_course.app_context.fs.open(src_filename)
if astream:
dst_filename = os.path.join(
self.app_context.get_home(),
self.get_activity_filename(
None, dst_lesson.lesson_id))
self.app_context.fs.put(dst_filename, astream)
if not is_editable_fs(self._app_context):
errors.append(
'Target course %s must be '
'on read-write media.' % self.app_context.raw)
return None, None
if self.get_units():
errors.append(
'Target course %s must be '
'empty.' % self.app_context.raw)
return None, None
# Iterate over course structure and assets and import each item.
for unit in src_course.get_units():
new_unit = self.add_unit(unit.type, unit.title)
# TODO(johncox): Create a full flow for importing a
# Course13 into a Course13
if src_course.version == self.VERSION:
copy_unit13_into_unit13(unit, new_unit)
else:
copy_unit12_into_unit13(unit, new_unit)
for lesson in src_course.get_lessons(unit.unit_id):
new_lesson = self.add_lesson(new_unit, lesson.title)
copy_lesson12_into_lesson13(unit, lesson, new_unit, new_lesson)
return src_course, self
def to_json(self):
"""Creates JSON representation of this instance."""
persistent = PersistentCourse13(
next_id=self._next_id, units=self._units, lessons=self._lessons)
return transforms.dumps(
persistent.to_dict(),
indent=4, sort_keys=True,
default=lambda o: o.__dict__)
class Workflow(object):
"""Stores workflow specifications for assessments."""
def __init__(self, yaml_str):
"""Sets yaml_str (the workflow spec), without doing any validation."""
self._yaml_str = yaml_str
def to_yaml(self):
return self._yaml_str
def to_dict(self):
if not self._yaml_str:
return {}
obj = yaml.safe_load(self._yaml_str)
assert isinstance(obj, dict)
return obj
def _convert_date_string_to_datetime(self, date_str):
"""Returns a datetime object."""
if not date_str:
return None
return datetime.strptime(date_str, ISO_8601_DATE_FORMAT)
def get_grader(self):
"""Returns the associated grader."""
return self.to_dict().get(GRADER_KEY)
def get_matcher(self):
return self.to_dict().get(MATCHER_KEY)
def get_submission_due_date(self):
date_str = self.to_dict().get(SUBMISSION_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_due_date(self):
date_str = self.to_dict().get(REVIEW_DUE_DATE_KEY)
if date_str is None:
return None
return self._convert_date_string_to_datetime(date_str)
def get_review_min_count(self):
return self.to_dict().get(REVIEW_MIN_COUNT_KEY)
def get_review_window_mins(self):
return self.to_dict().get(REVIEW_WINDOW_MINS_KEY)
def _ensure_value_is_nonnegative_int(self, workflow_dict, key, errors):
"""Checks that workflow_dict[key] is a non-negative integer."""
value = workflow_dict[key]
if not isinstance(value, int):
errors.append('%s should be an integer' % key)
elif value < 0:
errors.append('%s should be a non-negative integer' % key)
def validate(self, errors=None):
"""Tests whether the current Workflow object is valid."""
if errors is None:
errors = []
try:
# Validate the workflow specification (in YAML format).
assert self._yaml_str, 'missing key: %s.' % GRADER_KEY
workflow_dict = yaml.safe_load(self._yaml_str)
assert isinstance(workflow_dict, dict), (
'expected the YAML representation of a dict')
assert GRADER_KEY in workflow_dict, 'missing key: %s.' % GRADER_KEY
assert workflow_dict[GRADER_KEY] in ALLOWED_GRADERS, (
'invalid grader, should be one of: %s' %
', '.join(ALLOWED_GRADERS))
workflow_errors = []
submission_due_date = None
if SUBMISSION_DUE_DATE_KEY in workflow_dict.keys():
try:
submission_due_date = self._convert_date_string_to_datetime(
workflow_dict[SUBMISSION_DUE_DATE_KEY])
except Exception as e: # pylint: disable-msg=broad-except
workflow_errors.append(
'dates should be formatted as YYYY-MM-DD hh:mm '
'(e.g. 1997-07-16 19:20) and be specified in the UTC '
'timezone')
if workflow_errors:
raise Exception('%s.' % '; '.join(workflow_errors))
if workflow_dict[GRADER_KEY] == HUMAN_GRADER:
missing_keys = []
for key in HUMAN_GRADED_ASSESSMENT_KEY_LIST:
if key not in workflow_dict:
missing_keys.append(key)
elif (isinstance(workflow_dict[key], basestring) and not
workflow_dict[key]):
missing_keys.append(key)
assert not missing_keys, (
'missing key(s) for a human-reviewed assessment: %s.' %
', '.join(missing_keys))
if (workflow_dict[MATCHER_KEY] not in
review.ALLOWED_MATCHERS):
workflow_errors.append(
'invalid matcher, should be one of: %s' %
', '.join(review.ALLOWED_MATCHERS))
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_MIN_COUNT_KEY, workflow_errors)
self._ensure_value_is_nonnegative_int(
workflow_dict, REVIEW_WINDOW_MINS_KEY, workflow_errors)
try:
review_due_date = self._convert_date_string_to_datetime(
workflow_dict[REVIEW_DUE_DATE_KEY])
if submission_due_date > review_due_date:
workflow_errors.append(
'submission due date should be earlier than '
'review due date')
except Exception as e: # pylint: disable-msg=broad-except
workflow_errors.append(
'dates should be formatted as YYYY-MM-DD hh:mm '
'(e.g. 1997-07-16 19:20) and be specified in the UTC '
'timezone')
if workflow_errors:
raise Exception('%s.' % '; '.join(workflow_errors))
return True
except Exception as e: # pylint: disable-msg=broad-except
errors.append('Error validating workflow specification: %s' % e)
return False
class Course(object):
"""Manages a course and all of its components."""
@classmethod
def get_environ(cls, app_context):
"""Returns currently defined course settings as a dictionary."""
course_yaml = None
course_yaml_dict = None
course_data_filename = app_context.get_config_filename()
if app_context.fs.isfile(course_data_filename):
course_yaml = app_context.fs.open(course_data_filename)
if not course_yaml:
return deep_dict_merge(DEFAULT_COURSE_YAML_DICT,
COURSE_TEMPLATE_DICT)
try:
course_yaml_dict = yaml.safe_load(
course_yaml.read().decode('utf-8'))
except Exception as e: # pylint: disable-msg=broad-except
logging.info(
'Error: course.yaml file at %s not accessible, '
'loading defaults. %s', course_data_filename, e)
if not course_yaml_dict:
return deep_dict_merge(DEFAULT_COURSE_YAML_DICT,
COURSE_TEMPLATE_DICT)
return deep_dict_merge(deep_dict_merge(
course_yaml_dict, DEFAULT_EXISTING_COURSE_YAML_DICT),
COURSE_TEMPLATE_DICT)
@property
def version(self):
return self._model.VERSION
@classmethod
def create_new_default_course(cls, app_context):
return CourseModel13(app_context)
@classmethod
def custom_new_default_course_for_test(cls, app_context):
# There is an expectation in our tests of automatic import
# of data/*.csv files. This method can be used in tests to achieve
# exactly that.
model = CourseModel12.load(app_context)
if model:
return model
return CourseModel13(app_context)
@classmethod
def _load(cls, app_context):
"""Loads course data from persistence storage into this instance."""
if not is_editable_fs(app_context):
model = CourseModel12.load(app_context)
if model:
return model
else:
model = CourseModel13.load(app_context)
if model:
return model
return cls.create_new_default_course(app_context)
def __init__(self, handler, app_context=None):
self._app_context = app_context if app_context else handler.app_context
self._namespace = self._app_context.get_namespace_name()
self._model = self._load(self._app_context)
self._tracker = None
self._reviews_processor = None
@property
def app_context(self):
return self._app_context
def to_json(self):
return self._model.to_json()
def get_progress_tracker(self):
if not self._tracker:
self._tracker = progress.UnitLessonCompletionTracker(self)
return self._tracker
def get_reviews_processor(self):
if not self._reviews_processor:
self._reviews_processor = review.ReviewsProcessor(self)
return self._reviews_processor
def get_units(self):
return self._model.get_units()
def get_units_of_type(self, unit_type):
return [unit for unit in self.get_units() if unit_type == unit.type]
def get_lessons(self, unit_id):
return self._model.get_lessons(unit_id)
def get_lessons_for_all_units(self):
lessons = []
for unit in self.get_units():
for lesson in self.get_lessons(unit.unit_id):
lessons.append(lesson)
return lessons
def save(self):
return self._model.save()
def find_unit_by_id(self, unit_id):
return self._model.find_unit_by_id(unit_id)
def find_lesson_by_id(self, unit, lesson_id):
return self._model.find_lesson_by_id(unit, lesson_id)
def is_last_assessment(self, unit):
"""Checks whether the given unit is the last of all the assessments."""
for current_unit in reversed(self.get_units()):
if current_unit.type == verify.UNIT_TYPE_ASSESSMENT:
return current_unit.unit_id == unit.unit_id
return False
def add_unit(self):
"""Adds new unit to a course."""
return self._model.add_unit('U', 'New Unit')
def add_link(self):
"""Adds new link (other) to a course."""
return self._model.add_unit('O', 'New Link')
def add_assessment(self):
"""Adds new assessment to a course."""
return self._model.add_unit('A', 'New Assessment')
def add_lesson(self, unit):
return self._model.add_lesson(unit, 'New Lesson')
def update_unit(self, unit):
return self._model.update_unit(unit)
def update_lesson(self, lesson):
return self._model.update_lesson(lesson)
def move_lesson_to(self, lesson, unit):
return self._model.move_lesson_to(lesson, unit)
def delete_all(self):
return self._model.delete_all()
def delete_unit(self, unit):
return self._model.delete_unit(unit)
def delete_lesson(self, lesson):
return self._model.delete_lesson(lesson)
def get_score(self, student, assessment_id):
"""Gets a student's score for a particular assessment."""
assert self.is_valid_assessment_id(assessment_id)
scores = transforms.loads(student.scores) if student.scores else {}
return scores.get(assessment_id) if scores else None
def get_overall_score(self, student):
"""Gets the overall course score for a student."""
score_list = self.get_all_scores(student)
overall_score = 0
total_weight = 0
for unit in score_list:
if not unit['human_graded']:
total_weight += unit['weight']
overall_score += unit['weight'] * unit['score']
if total_weight == 0:
return None
return int(float(overall_score) / total_weight)
def is_course_complete(self, student):
"""Returns true if the student has completed the course."""
score_list = self.get_all_scores(student)
for unit in score_list:
if not unit['completed']:
return False
return True
def update_final_grades(self, student):
"""Updates the final grades of the student."""
if (models.CAN_SHARE_STUDENT_PROFILE.value and
self.is_course_complete(student)):
overall_score = self.get_overall_score(student)
models.StudentProfileDAO.update(
student.user_id, student.email, final_grade=overall_score)
def get_overall_result(self, student):
"""Gets the overall result based on a student's score profile."""
score = self.get_overall_score(student)
if score is None:
return None
# This can be replaced with a custom definition for an overall result
# string.
return 'pass' if self.get_overall_score(student) >= 70 else 'fail'
def get_all_scores(self, student):
"""Gets all score data for a student.
Args:
student: the student whose scores should be retrieved.
Returns:
an array of dicts, each representing an assessment. Each dict has
the keys 'id', 'title', 'weight' and 'score' (if available),
representing the unit id, the assessment title, the weight
contributed by the assessment to the final score, and the
assessment score.
"""
assessment_list = self.get_assessment_list()
scores = transforms.loads(student.scores) if student.scores else {}
unit_progress = self.get_progress_tracker().get_unit_progress(student)
assessment_score_list = []
for unit in assessment_list:
# Compute the weight for this assessment.
weight = 0
if hasattr(unit, 'weight'):
weight = unit.weight
elif unit.unit_id in DEFAULT_LEGACY_ASSESSMENT_WEIGHTS:
weight = DEFAULT_LEGACY_ASSESSMENT_WEIGHTS[unit.unit_id]
completed = unit_progress[unit.unit_id]
# If a human-reviewed assessment is completed, ensure that the
# required reviews have also been completed.
if completed and self.needs_human_grader(unit):
reviews = self.get_reviews_processor().get_review_steps_by(
unit.unit_id, student.get_key())
review_min_count = unit.workflow.get_review_min_count()
if not review.ReviewUtils.has_completed_enough_reviews(
reviews, review_min_count):
completed = False
assessment_score_list.append({
'id': str(unit.unit_id),
'title': unit.title,
'weight': weight,
'completed': completed,
'human_graded': self.needs_human_grader(unit),
'score': (scores[str(unit.unit_id)]
if str(unit.unit_id) in scores else 0),
})
return assessment_score_list
def get_assessment_list(self):
"""Returns a list of units that are assessments."""
# TODO(psimakov): Streamline this so that it does not require a full
# iteration on each request, probably by modifying the index() method.
assessment_list = []
for unit in self.get_units():
if verify.UNIT_TYPE_ASSESSMENT == unit.type:
assessment_list.append(unit)
return copy.deepcopy(assessment_list)
def get_peer_reviewed_units(self):
"""Returns a list of units that are peer-reviewed assessments.
Returns:
A list of units that are peer-reviewed assessments. Each unit
in the list has a unit_id of type string.
"""
assessment_list = self.get_assessment_list()
units = copy.deepcopy([unit for unit in assessment_list if (
unit.workflow.get_grader() == HUMAN_GRADER and
unit.workflow.get_matcher() == review.PEER_MATCHER)])
for unit in units:
unit.unit_id = str(unit.unit_id)
return units
def get_assessment_filename(self, unit_id):
return self._model.get_assessment_filename(unit_id)
def get_review_form_filename(self, unit_id):
return self._model.get_review_form_filename(unit_id)
def get_activity_filename(self, unit_id, lesson_id):
return self._model.get_activity_filename(unit_id, lesson_id)
def get_components(self, unit_id, lesson_id):
"""Returns a list of dicts representing the components in a lesson.
Args:
unit_id: the id of the unit containing the lesson
lesson_id: the id of the lesson
Returns:
A list of dicts. Each dict represents one component and has two
keys:
- instanceid: the instance id of the component
- cpt_name: the name of the component tag (e.g. gcb-googlegroup)
"""
unit = self.find_unit_by_id(unit_id)
lesson = self.find_lesson_by_id(unit, lesson_id)
if not lesson.objectives:
return []
return common.tags.get_components_from_html(lesson.objectives)
def get_assessment_components(self, unit_id):
"""Returns a list of dicts representing components in an assessment.
Args:
unit_id: the id of the assessment unit
Returns:
A list of dicts. Each dict represents one component and has two
keys:
- instanceid: the instance id of the component
- cpt_name: the name of the component tag (e.g. gcb-googlegroup)
"""
unit = self.find_unit_by_id(unit_id)
if not getattr(unit, 'html_content', None):
return []
return common.tags.get_components_from_html(unit.html_content)
def get_question_components(self, unit_id, lesson_id):
"""Returns a list of dicts representing the questions in a lesson."""
components = self.get_components(unit_id, lesson_id)
question_components = []
for component in components:
if component.get('cpt_name') == 'question':
question_components.append(component)
return question_components
def get_question_group_components(self, unit_id, lesson_id):
"""Returns a list of dicts representing the q_groups in a lesson."""
components = self.get_components(unit_id, lesson_id)
question_group_components = []
for component in components:
if component.get('cpt_name') == 'question-group':
question_group_components.append(component)
return question_group_components
def needs_human_grader(self, unit):
return unit.workflow.get_grader() == HUMAN_GRADER
def reorder_units(self, order_data):
return self._model.reorder_units(order_data)
def get_assessment_content(self, unit):
"""Returns the schema for an assessment as a Python dict."""
return self._model.get_assessment_content(unit)
def get_assessment_model_version(self, unit):
return self._model.get_assessment_model_version(unit)
def get_review_form_content(self, unit):
"""Returns the schema for a review form as a Python dict."""
return self._model.get_review_form_content(unit)
def set_assessment_content(self, unit, assessment_content, errors=None):
return self._model.set_assessment_content(
unit, assessment_content, errors=errors)
def set_review_form(self, unit, review_form, errors=None):
return self._model.set_review_form(unit, review_form, errors=errors)
def set_activity_content(self, lesson, activity_content, errors=None):
return self._model.set_activity_content(
lesson, activity_content, errors=errors)
def is_valid_assessment_id(self, assessment_id):
"""Tests whether the given assessment id is valid."""
for unit in self.get_units():
if (verify.UNIT_TYPE_ASSESSMENT == unit.type and
str(assessment_id) == str(unit.unit_id)):
return True
return False
def is_valid_unit_lesson_id(self, unit_id, lesson_id):
"""Tests whether the given unit id and lesson id are valid."""
for unit in self.get_units():
if str(unit.unit_id) == str(unit_id):
for lesson in self.get_lessons(unit_id):
if str(lesson.lesson_id) == str(lesson_id):
return True
return False
def import_from(self, app_context, errors=None):
"""Import course structure and assets from another courses."""
src_course = Course(None, app_context=app_context)
if errors is None:
errors = []
# Import 1.2 -> 1.3
if (src_course.version == CourseModel12.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
# import 1.3 -> 1.3
if (src_course.version == CourseModel13.VERSION and
self.version == CourseModel13.VERSION):
return self._model.import_from(src_course, errors)
errors.append(
'Import of '
'course %s (version %s) into '
'course %s (version %s) '
'is not supported.' % (
app_context.raw, src_course.version,
self.app_context.raw, self.version))
return None, None
def get_course_announcement_list_email(self):
"""Get Announcement email address for the course."""
course_env = self.get_environ(self._app_context)
if not course_env:
return None
if 'course' not in course_env:
return None
course_dict = course_env['course']
if 'announcement_list_email' not in course_dict:
return None
announcement_list_email = course_dict['announcement_list_email']
if announcement_list_email:
return announcement_list_email
return None
def init_new_course_settings(self, title, admin_email):
"""Initializes new course.yaml file if it does not yet exists."""
fs = self.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if fs.isfile(course_yaml):
return False
title = title.replace('\'', '\'\'')
course_yaml_text = u"""# my new course.yaml
course:
title: '%s'
admin_user_emails: '[%s]'
now_available: False
""" % (title, admin_email)
fs.put(course_yaml, vfs.string_to_stream(course_yaml_text))
return True
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting dynamically registering custom modules."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
class Module(object):
"""A class that holds module information."""
def __init__(
self, name, desc, global_routes, namespaced_routes,
notify_module_enabled=None, notify_module_disabled=None):
self._name = name
self._desc = desc
self._global_routes = global_routes
self._namespaced_routes = namespaced_routes
self._notify_module_enabled = notify_module_enabled
self._notify_module_disabled = notify_module_disabled
Registry.registered_modules[self._name] = self
def disable(self):
if self.name in Registry.enabled_module_names:
Registry.enabled_module_names.remove(self.name)
if self._notify_module_disabled:
self._notify_module_disabled()
def enable(self):
Registry.enabled_module_names.add(self.name)
if self._notify_module_enabled:
self._notify_module_enabled()
@property
def enabled(self):
return self.name in Registry.enabled_module_names
@property
def name(self):
return self._name
@property
def desc(self):
return self._desc
@property
def global_routes(self):
if self.name in Registry.enabled_module_names:
return self._global_routes
else:
return []
@property
def namespaced_routes(self):
if self.name in Registry.enabled_module_names:
return self._namespaced_routes
else:
return []
class Registry(object):
"""A registry that holds all custom modules."""
registered_modules = {}
enabled_module_names = set()
@classmethod
def get_all_routes(cls):
global_routes = []
namespaced_routes = []
for registered_module in cls.registered_modules.values():
if registered_module.enabled:
# Only populate the routing table with enabled modules.
global_routes += registered_module.global_routes
namespaced_routes += registered_module.namespaced_routes
return global_routes, namespaced_routes
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Sending notifications."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
from google.appengine.api import mail
from google.appengine.api import users
class EmailManager(object):
"""Notification Manager. Sends emails out."""
def __init__(self, course):
self._course = course
self._user = users.get_current_user()
def send_mail(self, subject, body, reciever):
"""send email."""
message = mail.EmailMessage()
message.sender = self._user.email()
message.to = self._user.email()
message.bcc = reciever
message.subject = subject
message.html = body
message.send()
return True
def send_announcement(self, subject, body):
"""Send an announcement to course announcement list."""
announce_email = self._course.get_course_announcement_list_email()
if announce_email:
return self.send_mail(subject, body, announce_email)
return False
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of converters between db models, Python and JSON dictionaries, etc."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import base64
import datetime
import json
from xml.etree import ElementTree
import entities
from google.appengine.api import datastore_types
from google.appengine.ext import db
JSON_DATE_FORMAT = '%Y/%m/%d'
JSON_TYPES = ['string', 'date', 'text', 'html', 'boolean', 'integer', 'number',
'array', 'object']
# Prefix to add to all JSON responses to guard against XSSI. Must be kept in
# sync with modules/oeditor/oeditor.html.
_JSON_XSSI_PREFIX = ")]}'\n"
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
SUPPORTED_TYPES = (
datastore_types.Key,
datetime.date,
db.GeoPt,
)
def dict_to_json(source_dict, unused_schema):
"""Converts Python dictionary into JSON dictionary using schema."""
output = {}
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datastore_types.Key):
output[key] = str(value)
elif isinstance(value, datetime.date):
output[key] = value.strftime(JSON_DATE_FORMAT)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
else:
raise ValueError(
'Failed to encode key \'%s\' with value \'%s\'.' % (key, value))
return output
def dumps(*args, **kwargs):
"""Wrapper around json.dumps.
No additional behavior; present here so this module is a drop-in replacement
for json.dumps|loads. Clients should never use json.dumps|loads directly.
See usage docs at http://docs.python.org/2/library/json.html.
Args:
*args: positional arguments delegated to json.dumps.
**kwargs: keyword arguments delegated to json.dumps.
Returns:
string. The converted JSON.
"""
class SetAsListJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return super(SetAsListJSONEncoder, self).default(obj)
if 'cls' not in kwargs:
kwargs['cls'] = SetAsListJSONEncoder
return json.dumps(*args, **kwargs)
def loads(s, prefix=_JSON_XSSI_PREFIX, **kwargs):
"""Wrapper around json.loads that handles XSSI-protected responses.
To prevent XSSI we insert a prefix before our JSON responses during server-
side rendering. This loads() removes the prefix and should always be used in
place of json.loads. See usage docs at
http://docs.python.org/2/library/json.html.
Args:
s: str or unicode. JSON contents to convert.
prefix: string. The XSSI prefix we remove before conversion.
**kwargs: keyword arguments delegated to json.loads.
Returns:
object. Python object reconstituted from the given JSON string.
"""
if s.startswith(prefix):
s = s.lstrip(prefix)
return json.loads(s, **kwargs)
def json_to_dict(source_dict, schema):
"""Converts JSON dictionary into Python dictionary using schema."""
def convert_bool(value, key):
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
value = value.lower()
if value == 'true':
return True
elif value == 'false':
return False
raise ValueError('Bad boolean value for %s: %s' % (key, value))
output = {}
for key, attr in schema['properties'].items():
# Skip schema elements that don't exist in source.
if key not in source_dict:
if 'true' != attr.get('optional'):
raise ValueError('Missing required attribute: %s' % key)
continue
attr_type = attr['type']
if attr_type not in JSON_TYPES:
raise ValueError('Unsupported JSON type: %s' % attr_type)
if attr_type == 'object':
output[key] = json_to_dict(source_dict[key], attr)
elif attr_type == 'date':
output[key] = datetime.datetime.strptime(
source_dict[key], JSON_DATE_FORMAT).date()
elif attr_type == 'number':
output[key] = float(source_dict[key])
elif attr_type == 'boolean':
output[key] = convert_bool(source_dict[key], key)
elif attr_type == 'array':
subschema = attr['items']
array = []
for item in source_dict[key]:
array.append(json_to_dict(item, subschema))
output[key] = array
else:
output[key] = source_dict[key]
return output
def entity_to_dict(entity, force_utf_8_encoding=False):
"""Puts model object attributes into a Python dictionary."""
output = {}
for_export = isinstance(entity, entities.ExportEntity)
properties = entity.properties()
if for_export:
for name in entity.instance_properties():
properties[name] = getattr(entity, name)
for key, prop in properties.iteritems():
value = getattr(entity, key)
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
output[key] = value
# some values are raw bytes; force utf-8 or base64 encoding
if force_utf_8_encoding and isinstance(value, basestring):
try:
output[key] = value.encode('utf-8')
except UnicodeDecodeError:
output[key] = {
'type': 'binary',
'encoding': 'base64',
'content': base64.urlsafe_b64encode(value)}
else:
raise ValueError('Failed to encode: %s' % prop)
# explicitly add entity key as a 'string' attribute
output['key'] = str(entity.safe_key) if for_export else str(entity.key())
if for_export:
output.pop('safe_key')
return output
def dict_to_entity(entity, source_dict):
"""Sets model object attributes from a Python dictionary."""
for key, value in source_dict.items():
if value is None or isinstance(value, SIMPLE_TYPES) or isinstance(
value, SUPPORTED_TYPES):
setattr(entity, key, value)
else:
raise ValueError('Failed to encode: %s' % value)
return entity
def string_to_value(string, value_type):
"""Converts string representation to a value."""
if value_type == str:
if not string:
return ''
else:
return string
elif value_type == bool:
if string == '1' or string == 'True' or string == 1:
return True
else:
return False
elif value_type == int or value_type == long:
if not string:
return 0
else:
return long(string)
else:
raise ValueError('Unknown type: %s' % value_type)
def value_to_string(value, value_type):
"""Converts value to a string representation."""
if value_type == str:
return value
elif value_type == bool:
if value:
return 'True'
else:
return 'False'
elif value_type == int or value_type == long:
return str(value)
else:
raise ValueError('Unknown type: %s' % value_type)
def dict_to_instance(adict, instance, defaults=None):
"""Populates instance attributes using data dictionary."""
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
if key in adict:
setattr(instance, key, adict[key])
elif defaults and key in defaults:
setattr(instance, key, defaults[key])
else:
raise KeyError(key)
def instance_to_dict(instance):
"""Populates data dictionary from instance attrs."""
adict = {}
for key, unused_value in instance.__dict__.iteritems():
if not key.startswith('_'):
adict[key] = getattr(instance, key)
return adict
def send_json_response(
handler, status_code, message, payload_dict=None, xsrf_token=None):
"""Formats and sends out a JSON REST response envelope and body."""
handler.response.headers[
'Content-Type'] = 'application/javascript; charset=utf-8'
handler.response.headers['X-Content-Type-Options'] = 'nosniff'
handler.response.headers['Content-Disposition'] = 'attachment'
response = {}
response['status'] = status_code
response['message'] = message
if payload_dict:
response['payload'] = dumps(payload_dict)
if xsrf_token:
response['xsrf_token'] = xsrf_token
handler.response.write(_JSON_XSSI_PREFIX + dumps(response))
def send_json_file_upload_response(handler, status_code, message):
"""Formats and sends out a JSON REST response envelope and body.
NOTE: This method has lowered protections against XSSI (compared to
send_json_response) and so it MUST NOT be used with dynamic data. Use ONLY
constant data originating entirely on the server as arguments.
Args:
handler: the request handler.
status_code: the HTTP status code for the response.
message: the text of the message - must not be dynamic data.
"""
# The correct MIME type for JSON is application/json but there are issues
# with our AJAX file uploader in MSIE which require text/plain instead.
if 'MSIE' in handler.request.headers.get('user-agent'):
content_type = 'text/plain; charset=utf-8'
else:
content_type = 'application/javascript; charset=utf-8'
handler.response.headers['Content-Type'] = content_type
handler.response.headers['X-Content-Type-Options'] = 'nosniff'
response = {}
response['status'] = status_code
response['message'] = message
handler.response.write(_JSON_XSSI_PREFIX + dumps(response))
class JsonFile(object):
"""A streaming file-ish interface for JSON content.
Usage:
writer = JsonFile('path')
writer.open('w')
writer.write(json_serializable_python_object) # We serialize for you.
writer.write(another_json_serializable_python_object)
writer.close() # Must close before read.
reader = JsonFile('path')
reader.open('r') # Only 'r' and 'w' are supported.
for entity in reader:
do_something_with(entity) # We deserialize back to Python for you.
self.reader.reset() # Reset read pointer to head.
contents = self.reader.read() # Returns {'rows': [...]}.
for entity in contents['rows']:
do_something_with(entity) # Again, deserialized back to Python.
reader.close()
with syntax is not supported. Cannot be used inside the App Engine
container where the filesystem is read-only.
Internally, each call to write will take a Python object, serialize it, and
write the contents as one line to the json file. On __iter__ we deserialize
one line at a time, generator-style, to avoid OOM unless serialization/de-
serialization of one object exhausts memory.
"""
# When writing to files use \n instead of os.linesep; see
# http://docs.python.org/2/library/os.html.
_LINE_TEMPLATE = ',\n %s'
_MODE_READ = 'r'
_MODE_WRITE = 'w'
_MODES = frozenset([_MODE_READ, _MODE_WRITE])
_PREFIX = '{"rows": ['
_SUFFIX = ']}'
def __init__(self, path):
self._first = True
self._file = None
self._path = path
def __iter__(self):
assert self._file
return self
def close(self):
"""Closes the file; must close before read."""
assert self._file
if not self._file.closed: # Like file, allow multiple close calls.
if self.mode == self._MODE_WRITE:
self._file.write('\n' + self._SUFFIX)
self._file.close()
@property
def mode(self):
"""Returns the mode the file was opened in."""
assert self._file
return self._file.mode
@property
def name(self):
"""Returns string name of the file."""
assert self._file
return self._file.name
def next(self):
"""Retrieves the next line and deserializes it into a Python object."""
assert self._file
line = self._file.readline()
if line.startswith(self._PREFIX):
line = self._file.readline()
if line.endswith(self._SUFFIX):
raise StopIteration()
line = line.strip()
if line.endswith(','):
line = line[:-1]
return loads(line)
def open(self, mode):
"""Opens the file in the given mode string ('r, 'w' only)."""
assert not self._file
assert mode in self._MODES
self._file = open(self._path, mode)
if self.mode == self._MODE_WRITE:
self._file.write(self._PREFIX)
def read(self):
"""Reads the file into a single Python object; may exhaust memory.
Returns:
dict. Format: {'rows': [...]} where the value is a list of de-
serialized objects passed to write.
"""
assert self._file
return loads(self._file.read())
def reset(self):
"""Resets file's position to head."""
assert self._file
self._file.seek(0)
def write(self, python_object):
"""Writes serialized JSON representation of python_object to file.
Args:
python_object: object. Contents to write. Must be JSON-serializable.
Raises:
ValueError: if python_object cannot be JSON-serialized.
"""
assert self._file
template = self._LINE_TEMPLATE
if self._first:
template = template[1:]
self._first = False
self._file.write(template % dumps(python_object))
def convert_dict_to_xml(element, python_object):
if isinstance(python_object, dict):
for key, value in dict.items(python_object):
dict_element = ElementTree.Element(key)
element.append(dict_element)
convert_dict_to_xml(dict_element, value)
elif isinstance(python_object, list):
list_element = ElementTree.Element('list')
element.append(list_element)
for item in python_object:
item_element = ElementTree.Element('item')
list_element.append(item_element)
convert_dict_to_xml(item_element, item)
else:
try:
loaded_python_object = loads(python_object)
convert_dict_to_xml(element, loaded_python_object)
except: # pylint: disable-msg=bare-except
element.text = unicode(python_object)
return
def convert_json_rows_file_to_xml(json_fn, xml_fn):
"""To XML converter for JSON files created by JsonFile writer.
Usage:
convert_json_rows_file_to_xml('Student.json', 'Student.xml')
Args:
json_fn: filename of the JSON file (readable with JsonFile) to import.
xml_fn: filename of the target XML file to export.
The dict and list objects are unwrapped; all other types are converted to
Unicode strings.
"""
json_file = JsonFile(json_fn)
json_file.open('r')
xml_file = open(xml_fn, 'w')
xml_file.write('<rows>')
for line in json_file:
root = ElementTree.Element('row')
convert_dict_to_xml(root, line)
xml_file.write(ElementTree.tostring(root, encoding='utf-8'))
xml_file.write('\n')
xml_file.write('</rows>')
xml_file.close()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages mapping of users to roles and roles to privileges."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import config
from google.appengine.api import users
GCB_ADMIN_LIST = config.ConfigProperty(
'gcb_admin_user_emails', str, (
'A list of email addresses for super-admin users. '
'WARNING! Super-admin users have the highest level of access to your '
'Google App Engine instance and to all data about all courses and '
'students within that instance. Be very careful when modifying this '
'property. Syntax: Surround each email address with [ and ]; for '
'example, [test@example.com]. Separate the entries with either a new '
'line or a space. Do not use regular expressions.'),
'', multiline=True)
KEY_COURSE = 'course'
KEY_ADMIN_USER_EMAILS = 'admin_user_emails'
class Roles(object):
"""A class that provides information about user roles."""
@classmethod
def is_direct_super_admin(cls):
"""Checks if current user is a super admin, without delegation."""
return users.get_current_user() and users.is_current_user_admin()
@classmethod
def is_super_admin(cls):
"""Checks if current user is a super admin, possibly via delegation."""
if cls.is_direct_super_admin():
return True
user = users.get_current_user()
if user and '[%s]' % user.email() in GCB_ADMIN_LIST.value:
return True
return False
@classmethod
def is_course_admin(cls, app_context):
"""Checks if a user is a course admin, possibly via delegation."""
if cls.is_super_admin():
return True
if KEY_COURSE in app_context.get_environ():
environ = app_context.get_environ()[KEY_COURSE]
if KEY_ADMIN_USER_EMAILS in environ:
allowed = environ[KEY_ADMIN_USER_EMAILS]
user = users.get_current_user()
if allowed and user and '[%s]' % user.email() in allowed:
return True
return False
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for processing text content."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from pyparsing import alphas
from pyparsing import Combine
from pyparsing import Each
from pyparsing import Group
from pyparsing import Literal
from pyparsing import nums
from pyparsing import Optional
from pyparsing import QuotedString
from pyparsing import Regex
from pyparsing import Suppress
from pyparsing import Word
from pyparsing import ZeroOrMore
from tools import verify
def sep(text):
"""Makes a separator."""
return Suppress(Literal(text))
def key(name):
"""Makes grammar expression for a key."""
return (
Literal(name) ^
(sep('\'') + Literal(name) + sep('\'')) ^
(sep('"') + Literal(name) + sep('"')))
def list_of(term):
"""Makes a delimited list of terms."""
return (
Optional(
term +
ZeroOrMore(Suppress(Literal(',')) + term) +
Optional(Suppress(Literal(',')))
)
)
def chunks(l, n):
"""Partitions the list l into disjoint sub-lists of length n."""
if len(l) % n != 0:
raise Exception('List length is not a multiple on %s', n)
return [l[i:i+n] for i in range(0, len(l), n)]
def make_dict(unused_s, unused_l, toks):
"""Makes a dict from the list using even items as keys, odd as values."""
result = {}
key_value_pairs = chunks(toks, 2)
for key_value_pair in key_value_pairs:
result[key_value_pair[0]] = key_value_pair[1]
return result
def make_list(unused_s, unused_l, toks):
"""Makes a list out of a token tuple holding a list."""
result = []
for item in toks:
result.append(item.asList())
return result
def make_bool(value):
"""Makes a boolean value lambda."""
def make_value():
return verify.Term(verify.BOOLEAN, value)
return make_value
def make_int(value):
"""Makes an int value lambda."""
return int(value[0])
def make_float(value):
"""Makes an float value lambda."""
return float(value[0])
class AssessmentParser13(object):
"""Grammar and parser for the assessment."""
string = (
QuotedString('\'', escChar='\\', multiline=True) ^
QuotedString('"', escChar='\\', multiline=True))
boolean = (
Literal('true').setParseAction(make_bool(True)) ^
Literal('false').setParseAction(make_bool(False)))
float = Combine(
Word(nums) + Optional(Literal('.') + Word(nums))
).setParseAction(make_float)
integer = Word(nums).setParseAction(make_int)
choice_decl = (
string ^
Combine(
sep('correct(') + string + sep(')')
).setParseAction(lambda x: verify.Term(verify.CORRECT, x[0]))
)
regex = (
Regex('/(.*)/i') ^
Combine(
sep('regex(') +
QuotedString('"', escChar='\\') +
sep(')')
).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
)
question_decl = (
sep('{') +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('lesson') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerString') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerRegex') + sep(':') +
regex + Optional(sep(','))) +
Optional(
key('correctAnswerNumeric') + sep(':') +
float + Optional(sep(','))) +
Optional(
key('choiceScores') + sep(':') +
sep('[') +
Group(list_of(float)).setParseAction(make_list) +
sep(']') +
Optional(sep(','))) +
Optional(
key('weight') + sep(':') + integer + Optional(sep(','))) +
Optional(
key('multiLine') + sep(':') +
boolean + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
sep('[') +
Group(list_of(choice_decl)).setParseAction(make_list) +
sep(']') +
Optional(sep(',')))
) +
sep('}')).setParseAction(make_dict)
assessment_grammar = (
sep('assessment') +
sep('=') +
sep('{') +
Each(
Optional(
key('assessmentName') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('preamble') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('checkAnswers') + sep(':') +
boolean + Optional(sep(','))) +
Optional(
key('questionsList') + sep(':') +
sep('[') +
Group(list_of(question_decl)).setParseAction(make_list) +
sep(']') +
Optional(sep(',')))
) +
sep('}') +
Optional(sep(';'))).setParseAction(make_dict)
@classmethod
def parse_string(cls, content):
return cls.assessment_grammar.parseString(content)
@classmethod
def parse_string_in_scope(cls, content, scope, root_name):
"""Parses assessment text following grammar."""
if 'assessment' != root_name:
raise Exception('Unsupported schema: %s', root_name)
# we need to extract the results as a dictionary; so we remove the
# outer array holding it
ast = cls.parse_string(content).asList()
if len(ast) == 1:
ast = ast[0]
return dict(
scope.items() +
{'__builtins__': {}}.items() +
{root_name: ast}.items())
class ActivityParser13(object):
"""Grammar and parser for the activity."""
variable = Word(alphas)
integer = Word(nums).setParseAction(make_int)
string = (
QuotedString('\'', escChar='\\', multiline=True) ^
QuotedString('"', escChar='\\', multiline=True))
boolean = (
Literal('true').setParseAction(make_bool(True)) ^
Literal('false').setParseAction(make_bool(False)))
regex = (
Regex('/(.*)/i') ^
Combine(
sep('regex(') +
QuotedString('"', escChar='\\') +
sep(')')
).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
)
choice_decl = Group(
sep('[') +
string + sep(',') +
boolean + sep(',') +
string +
sep(']')
)
choices_decl = Group(
sep('[') +
Optional(list_of(choice_decl)) +
sep(']')
).setParseAction(make_list)
multiple_choice_decl = (
key('questionType') + sep(':') + key('multiple choice') +
Optional(sep(','))
)
multiple_choice = (
sep('{') +
multiple_choice_decl +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
choices_decl + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
free_text_decl = (
key('questionType') + sep(':') + key('freetext') +
Optional(sep(','))
)
free_text = (
sep('{') +
free_text_decl +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerRegex') + sep(':') +
regex + Optional(sep(','))) +
Optional(
key('correctAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('incorrectAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('showAnswerPrompt') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('showAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('outputHeight') + sep(':') +
string + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
question_list_decl = (
sep('{') +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
sep('[') +
Group(list_of(string)).setParseAction(make_list) +
sep(']') +
Optional(sep(','))) +
Optional(
key('correctIndex') + sep(':') +
(integer ^ (
sep('[') +
Group(list_of(integer)).setParseAction(make_list) +
sep(']'))) +
Optional(sep(','))) +
Optional(
key('multiSelect') + sep(':') +
boolean + Optional(sep(','))),
) +
sep('}')).setParseAction(make_dict)
questions_list_decl = Group(
sep('[') +
Optional(list_of(question_list_decl)) +
sep(']')
).setParseAction(make_list)
multiple_choice_group_decl = (
key('questionType') + sep(':') + key('multiple choice group') +
Optional(sep(','))
)
multiple_choice_group = (
sep('{') +
multiple_choice_group_decl +
Each(
Optional(
key('questionGroupHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('allCorrectMinCount') + sep(':') +
integer + Optional(sep(','))) +
Optional(
key('allCorrectOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('someIncorrectOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('questionsList') + sep(':') +
questions_list_decl + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
activity_grammar = (
sep('activity') +
sep('=') +
sep('[') +
Optional(list_of(
string ^ multiple_choice ^ free_text ^ multiple_choice_group)) +
sep(']') +
Optional(sep(';')))
@classmethod
def parse_string(cls, content):
return cls.activity_grammar.parseString(content)
@classmethod
def parse_string_in_scope(cls, content, scope, root_name):
"""Parses activity text following grammar."""
if 'activity' != root_name:
raise Exception('Unsupported schema: %s', root_name)
return dict(
scope.items() +
{'__builtins__': {}}.items() +
{root_name: cls.parse_string(content).asList()}.items())
# here we register all the parser
SUPPORTED_PARSERS = {
'activity': ActivityParser13, 'assessment': AssessmentParser13}
def verify_activity(activity_text):
"""Parses and semantically verifies activity."""
activity = ActivityParser13.parse_string_in_scope(
activity_text, verify.Activity().scope, 'activity')
assert activity
verifier = verify.Verifier()
verifier.verify_activity_instance(activity, 'test')
def verify_assessment(assessment_text):
"""Parses and semantically verifies assessment."""
assessment = AssessmentParser13.parse_string_in_scope(
assessment_text, verify.Assessment().scope, 'assessment')
assert assessment
verifier = verify.Verifier()
verifier.verify_assessment_instance(assessment, 'test')
def parse_string_in_scope(content, scope, root_name):
parser = SUPPORTED_PARSERS.get(root_name)
if not parser:
raise Exception('Unsupported schema: %s', root_name)
return parser.parse_string_in_scope(content, scope, root_name)
def test_activity_multiple_choice_group():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
questionType: 'multiple choice group',
questionGroupHTML: '<p>This is text.</p>',
allCorrectMinCount: 55,
allCorrectOutput: '<p>This is text.</p>',
someIncorrectOutput: '<p>This is text.</p>',
questionsList: [
{questionHTML: '<p>This is text.</p>'},
{correctIndex: [1, 2, 3]},
{questionHTML: '<p>This is text.</p>',
correctIndex: 0, multiSelect: false,
choices: ['foo', 'bar'],},
]
},
{
"questionType": 'multiple choice group',
questionGroupHTML:
'<p>This section will test you on colors and numbers.</p>',
questionsList: [
{questionHTML: 'Pick all <i>odd</i> numbers:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [0, 2, 4]},
{questionHTML: 'Pick one <i>even</i> number:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [1, 3],
multiSelect: false},
{questionHTML: 'What color is the sky?',
choices: ['#00FF00', '#00FF00', '#0000FF'], correctIndex: 2}
],
allCorrectMinCount: 2,
allCorrectOutput: 'Great job! You know the material well.',
someIncorrectOutput: 'You must answer at least two questions correctly.'
}
];
""")
verify_activity(activity_text)
def test_activity_multiple_choice():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
questionType: 'multiple choice',
questionHTML: '<p>This is text.</p>',
choices: [
['<p>This is text.</p>', false, '<p>This is text.</p>'],
['<p>This is text.</p>', true, '<p>This is text.</p>'],
]
}
];
""")
verify_activity(activity_text)
def test_activity_free_text():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
'questionType': 'freetext',
questionHTML: '<p>This is text.</p>',
showAnswerPrompt: '<p>This is text.</p>',
showAnswerOutput: '<p>This is text.</p>',
correctAnswerRegex: regex("/4|four/i"),
correctAnswerOutput: '<p>This is text.</p>',
incorrectAnswerOutput: '<p>This is text.</p>',
},
{
questionType: 'freetext',
questionHTML: '<p>What color is the snow?</p>',
correctAnswerRegex: regex("/white/i"),
correctAnswerOutput: 'Correct!',
incorrectAnswerOutput: 'Try again.',
showAnswerOutput: 'Our search expert says: white!' },
];
""")
verify_activity(activity_text)
def test_assessment():
"""Test assessment parsing."""
# pylint: disable-msg=anomalous-backslash-in-string
assessment_text = (
"""assessment = {
assessmentName: '12345',
preamble: '<p>This is text.</p>',
checkAnswers: false,
questionsList: [
{questionHTML: '<p>This is text.</p>',
choices:
["A and B", "D and B", correct("A and C"), "C and D", "I don't know"]
},
{questionHTML: '<p>This is text.</p>',
choiceScores: [0, 0.5, 1.0],
weight: 3,
choices: [correct("True"), "False", "I don't know"]
},
{questionHTML: '<p>This is text.</p>',
correctAnswerString: 'sunrise',
correctAnswerNumeric: 7.9
},
{questionHTML: '<p>This is text.</p>',
correctAnswerNumeric: 7,
correctAnswerRegex: regex("/354\s*[+]\s*651/")
}
],
};
""")
# pylint: enable-msg=anomalous-backslash-in-string
verify_assessment(assessment_text)
def test_activity_ast():
"""Test a mix of various activities using legacy and new parser."""
activity_text = (
"""activity = [
'<p>This is just some <i>HTML</i> text!</p>',
{ questionType: 'multiple choice',
questionHTML: '<p>What letter am I thinking about now?</p>',
choices: [
['A', false, '"A" is wrong, try again.'],
['B', true, '"B" is correct!'],
['C', false, '"C" is wrong, try again.'],
['D', false, '"D" is wrong, try again.']
]
},
{ questionType: 'freetext',
questionHTML: '<p>What color is the snow?</p>',
correctAnswerRegex: regex("/white/i"),
correctAnswerOutput: 'Correct!',
incorrectAnswerOutput: 'Try again.',
showAnswerOutput: 'Our search expert says: white!' },
{ questionType: 'multiple choice group',
questionGroupHTML:
'<p>This section will test you on colors and numbers.</p>',
allCorrectMinCount: 2,
questionsList: [
{questionHTML: 'Pick all <i>odd</i> numbers:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [0, 2, 4]},
{questionHTML: 'Pick one <i>even</i> number:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [1, 3],
multiSelect: false},
{questionHTML: 'What color is the sky?',
choices: ['#00FF00', '#00FF00', '#0000FF'], correctIndex: 2}
],
allCorrectOutput: 'Great job! You know the material well.',
someIncorrectOutput: 'You must answer at least two questions correctly.'
}
];
""")
verify_activity(activity_text)
scope = verify.Activity().scope
current_ast = ActivityParser13.parse_string_in_scope(
activity_text, scope, 'activity')
expected_ast = verify.legacy_eval_python_expression_for_test(
activity_text, scope, 'activity')
same = (
len(current_ast.get('activity')) == 4 and
current_ast.get('activity') == expected_ast.get('activity') and
current_ast == expected_ast)
if not same:
import pprint # # pylint: disable-msg=g-import-not-at-top
pprint.pprint(current_ast.get('activity'))
pprint.pprint(expected_ast.get('activity'))
assert same
def test_assessment_ast():
"""Test a mix of various activities using legacy and new parser."""
# pylint: disable-msg=anomalous-backslash-in-string
assessment_text = (
"""assessment = {
preamble: '<p>This is text.</p>',
questionsList: [
{'questionHTML': '<p>This is text.</p>',
choices:
["A and B", "D and B", correct("A and C"), "C and D", "I don't know"]
},
{"questionHTML": '<p>This is text.</p>',
choices: [correct("True"), "False", "I don't know"],
choiceScores: [0, 0.5, 1.0],
weight: 3
},
{questionHTML: '<p>This is text.</p>',
correctAnswerString: 'sunrise'
},
{questionHTML: '<p>This is text.</p>',
correctAnswerRegex: regex("/354\s*[+]\s*651/")
}
],
assessmentName: 'Pre',
checkAnswers: false
}
""")
# pylint: enable-msg=anomalous-backslash-in-string
verify_assessment(assessment_text)
scope = verify.Assessment().scope
current_ast = AssessmentParser13.parse_string_in_scope(
assessment_text, scope, 'assessment')
expected_ast = verify.legacy_eval_python_expression_for_test(
assessment_text, scope, 'assessment')
same = (
len(current_ast.get('assessment')) == 4 and
len(current_ast.get('assessment').get('questionsList')) == 4 and
current_ast.get('assessment') == expected_ast.get('assessment') and
current_ast == expected_ast)
if not same:
import pprint # # pylint: disable-msg=g-import-not-at-top
pprint.pprint(current_ast.get('assessment'))
pprint.pprint(expected_ast.get('assessment'))
assert same
def test_list_of():
"""Test delimited list."""
grammar = Optional(
Literal('[') +
Optional(list_of(Literal('a') ^ Literal('b'))) +
Literal(']'))
assert str(['[', ']']) == str(grammar.parseString('[]'))
assert str(['[', 'a', ']']) == str(grammar.parseString('[a]'))
assert str(['[', 'b', ']']) == str(grammar.parseString('[b]'))
assert str(['[', 'a', ']']) == str(grammar.parseString('[a,]'))
assert str(['[', 'b', ']']) == str(grammar.parseString('[b,]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a, a, a, a]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a,a,a,a]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a,a,a,a,]'))
assert str(['[', 'a', 'b', 'a', 'b', ']']) == str(
grammar.parseString('[a,b,a,b]'))
assert str(['[', 'b', 'a', 'b', 'a', ']']) == str(
grammar.parseString('[b,a,b,a]'))
assert str(['[', 'b', 'b', 'b', 'b', ']']) == str(
grammar.parseString('[b,b,b,b]'))
assert not grammar.parseString('')
assert not grammar.parseString('[c]')
assert not grammar.parseString('[a,c,b]')
def run_all_unit_tests():
"""Run all unit tests."""
original = verify.parse_content
try:
verify.parse_content = parse_string_in_scope
test_list_of()
test_activity_multiple_choice()
test_activity_free_text()
test_activity_multiple_choice_group()
test_activity_ast()
test_assessment()
test_assessment_ast()
# test existing verifier using parsing instead of exec/compile
verify.test_sample_assets()
finally:
verify.parse_content = original
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models and helper utilities for the review workflow."""
__author__ = [
'johncox@google.com (John Cox)',
'sll@google.com (Sean Lip)',
]
import entities
import models
import transforms
from google.appengine.ext import db
class KeyProperty(db.StringProperty):
"""A property that stores a datastore key.
App Engine's db.ReferenceProperty is dangerous because accessing a
ReferenceProperty on a model instance implicitly causes an RPC. We always
want to know about and be in control of our RPCs, so we use this property
instead, store a key, and manually make datastore calls when necessary.
This is analogous to the approach ndb takes, and it also allows us to do
validation against a key's kind (see __init__).
Keys are stored as indexed strings internally. Usage:
class Foo(db.Model):
pass
class Bar(db.Model):
foo_key = KeyProperty(kind=Foo) # Validates key is of kind 'Foo'.
foo_key = Foo().put()
bar = Bar(foo_key=foo_key)
bar_key = bar.put()
foo = db.get(bar.foo_key)
"""
def __init__(self, *args, **kwargs):
"""Constructs a new KeyProperty.
Args:
*args: positional arguments passed to superclass.
**kwargs: keyword arguments passed to superclass. Additionally may
contain kind, which if passed will be a string used to validate
key kind. If omitted, any kind is considered valid.
"""
kind = kwargs.pop('kind', None)
super(KeyProperty, self).__init__(*args, **kwargs)
self._kind = kind
def validate(self, value):
"""Validates passed db.Key value, validating kind passed to ctor."""
super(KeyProperty, self).validate(str(value))
if value is None: # Nones are valid iff they pass the parent validator.
return value
if not isinstance(value, db.Key):
raise db.BadValueError(
'Value must be of type db.Key; got %s' % type(value))
if self._kind and value.kind() != self._kind:
raise db.BadValueError(
'Key must be of kind %s; was %s' % (self._kind, value.kind()))
return value
# For many classes we define both a _DomainObject subclass and a db.Model.
# When possible it is best to use the domain object, since db.Model carries with
# it the datastore API and allows clients to bypass business logic by making
# direct datastore calls.
class BaseEntity(entities.BaseEntity):
"""Abstract base entity for models related to reviews."""
@classmethod
def key_name(cls):
"""Returns a key_name for use with cls's constructor."""
raise NotImplementedError
@classmethod
def _split_key(cls, key_name):
"""Takes a key_name and returns its components."""
# '(a:b:(c:d:(e:f:g)):h)' -> ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'].
return key_name.replace('(', '').replace(')', '').split(':')
class Review(BaseEntity):
"""Datastore model for a student review of a Submission."""
# Contents of the student's review. Max size is 1MB.
contents = db.TextProperty()
# Key of the student whose work is being reviewed.
reviewee_key = KeyProperty(kind=models.Student.kind())
# Key of the Student who wrote this review.
reviewer_key = KeyProperty(kind=models.Student.kind())
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new Review."""
assert not kwargs.get('key_name'), (
'Setting key_name manually is not supported')
reviewee_key = kwargs.get('reviewee_key')
reviewer_key = kwargs.get('reviewer_key')
unit_id = kwargs.get('unit_id')
assert reviewee_key, 'Missing required property: reviewee_key'
assert reviewer_key, 'Missing required property: reviewer_key'
assert unit_id, 'Missing required_property: unit_id'
kwargs['key_name'] = self.key_name(unit_id, reviewee_key, reviewer_key)
super(Review, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, unit_id, reviewee_key, reviewer_key):
"""Creates a key_name string for datastore operations.
In order to work with the review subsystem, entities must have a key
name populated from this method.
Args:
unit_id: string. The id of the unit this review belongs to.
reviewee_key: db.Key of models.models.Student. The student whose
work is being reviewed.
reviewer_key: db.Key of models.models.Student. The author of this
the review.
Returns:
String.
"""
return '(review:%s:%s:%s)' % (unit_id, reviewee_key, reviewer_key)
@classmethod
def safe_key(cls, db_key, transform_fn):
_, unit_id, reviewee_key_str, reviewer_key_str = cls._split_key(
db_key.name())
reviewee_key = db.Key(encoded=reviewee_key_str)
reviewer_key = db.Key(encoded=reviewer_key_str)
safe_reviewee_key = models.Student.safe_key(reviewee_key, transform_fn)
safe_reviewer_key = models.Student.safe_key(reviewer_key, transform_fn)
return db.Key.from_path(
cls.kind(),
cls.key_name(unit_id, safe_reviewee_key, safe_reviewer_key))
def for_export(self, transform_fn):
model = super(Review, self).for_export(transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
model.reviewer_key = models.Student.safe_key(
model.reviewer_key, transform_fn)
return model
class Submission(BaseEntity):
"""Datastore model for a student work submission."""
# Contents of the student submission. Max size is 1MB.
contents = db.TextProperty()
# Key of the Student who wrote this submission.
reviewee_key = KeyProperty(kind=models.Student.kind())
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new Submission."""
assert not kwargs.get('key_name'), (
'Setting key_name manually is not supported')
reviewee_key = kwargs.get('reviewee_key')
unit_id = kwargs.get('unit_id')
assert reviewee_key, 'Missing required property: reviewee_key'
assert unit_id, 'Missing required_property: unit_id'
kwargs['key_name'] = self.key_name(unit_id, reviewee_key)
super(Submission, self).__init__(*args, **kwargs)
@classmethod
def _get_student_key(cls, value):
return db.Key.from_path(models.Student.kind(), value)
@classmethod
def key_name(cls, unit_id, reviewee_key):
"""Creates a key_name string for datastore operations.
In order to work with the review subsystem, entities must have a key
name populated from this method.
Args:
unit_id: string. The id of the unit this submission belongs to.
reviewee_key: db.Key of models.models.Student. The author of the
the submission.
Returns:
String.
"""
return '(submission:%s:%s)' % (unit_id, reviewee_key.id_or_name())
@classmethod
def get_key(cls, unit_id, reviewee_key):
"""Returns a db.Key for a submission."""
return db.Key.from_path(
cls.kind(), cls.key_name(unit_id, reviewee_key))
@classmethod
def safe_key(cls, db_key, transform_fn):
_, unit_id, student_key_str = cls._split_key(db_key.name())
student_key = db.Key.from_path(models.Student.kind(), student_key_str)
safe_student_key = models.Student.safe_key(student_key, transform_fn)
return db.Key.from_path(
cls.kind(), cls.key_name(unit_id, safe_student_key))
@classmethod
def write(cls, unit_id, reviewee_key, contents):
"""Updates or creates a student submission, and returns the key.
Args:
unit_id: string. The id of the unit this submission belongs to.
reviewee_key: db.Key of models.models.Student. The author of the
submission.
contents: object. The contents of the submission, as a Python
object. This will be JSON-transformed before it is stored.
Returns:
db.Key of Submission.
"""
return cls(
unit_id=str(unit_id), reviewee_key=reviewee_key,
contents=transforms.dumps(contents)
).put()
@classmethod
def get_contents(cls, unit_id, reviewee_key):
"""Returns the de-JSONified contents of a submission."""
submission_key = cls.get_key(unit_id, reviewee_key)
return cls.get_contents_by_key(submission_key)
@classmethod
def get_contents_by_key(cls, submission_key):
"""Returns the contents of a submission, given a db.Key."""
submission = entities.get(submission_key)
return transforms.loads(submission.contents) if submission else None
def for_export(self, transform_fn):
model = super(Submission, self).for_export(transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
return model
class StudentWorkUtils(object):
"""A utility class for processing student work objects."""
@classmethod
def get_answer_list(cls, submission):
"""Compiles a list of the student's answers from a submission."""
if not submission:
return []
answer_list = []
for item in submission:
# Check that the indices within the submission are valid.
assert item['index'] == len(answer_list)
answer_list.append(item['value'])
return answer_list
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing persistent entities."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from counters import PerfCounter
from google.appengine.ext import db
# datastore performance counters
DB_QUERY = PerfCounter(
'gcb-models-db-query',
'A number of times a query()/all() was executed on a datastore.')
DB_GET = PerfCounter(
'gcb-models-db-get',
'A number of times an object was fetched from datastore.')
DB_PUT = PerfCounter(
'gcb-models-db-put',
'A number of times an object was put into datastore.')
DB_DELETE = PerfCounter(
'gcb-models-db-delete',
'A number of times an object was deleted from datastore.')
# String. Name of the safe key property used for data export.
SAFE_KEY_NAME = 'safe_key'
def delete(keys):
"""Wrapper around db.delete that counts entities we attempted to get."""
DB_DELETE.inc(increment=_count(keys))
return db.delete(keys)
def get(keys):
"""Wrapper around db.get that counts entities we attempted to get."""
DB_GET.inc(increment=_count(keys))
return db.get(keys)
def put(keys):
"""Wrapper around db.put that counts entities we attempted to put."""
DB_PUT.inc(increment=_count(keys))
return db.put(keys)
def _count(keys):
# App engine accepts key or list of key; count entities found.
return len(keys) if isinstance(keys, (list, tuple)) else 1
class ExportEntity(db.Expando):
"""An entity instantiated, but never saved; for data export only.
Will not work with the webapp.
"""
def __init__(self, *args, **kwargs):
assert kwargs.get(SAFE_KEY_NAME)
super(ExportEntity, self).__init__(*args, **kwargs)
def get(self):
raise NotImplementedError
def put(self):
raise NotImplementedError
class BaseEntity(db.Model):
"""A common class to all datastore entities."""
# List of db.Property. The properties on this model that should be purged
# before export via tools/etl.py because they contain private information
# about a user. For fields that must be transformed rather than purged, see
# BaseEntity.for_export().
_PROPERTY_EXPORT_BLACKLIST = []
@classmethod
def all(cls, **kwds):
DB_QUERY.inc()
return super(BaseEntity, cls).all(**kwds)
@classmethod
def get(cls, keys):
DB_GET.inc()
return super(BaseEntity, cls).get(keys)
@classmethod
def get_by_key_name(cls, key_names):
DB_GET.inc()
return super(BaseEntity, cls).get_by_key_name(key_names)
@classmethod
def safe_key(cls, db_key, unused_transform_fn):
"""Creates a copy of db_key that is safe for export.
Keys may contain sensitive user data, like the user_id of a users.User.
This method takes a db_key for an entity that is the same kind as cls.
It returns a new instance of a key for that same kind with any sensitive
data irreversibly transformed.
The suggested irreversible transformation is cls.hash. The
transformation must take a value and a client-defined secret. It must be
deterministic and nonreversible.
Args:
db_key: db.Key of the same kind as cls. Key containing original
values.
unused_transform_fn: function that takes a single argument castable
to string and returns a transformed string of that user data
that is safe for export. If no user data is sensitive, the
identity transform should be used. Used in subclass
implementations.
Returns:
db.Key of the same kind as cls with sensitive data irreversibly
transformed.
"""
assert cls.kind() == db_key.kind()
return db_key
def _get_export_blacklist(self):
"""Collapses all _PROPERTY_EXPORT_BLACKLISTs in the class hierarchy."""
blacklist = []
for klass in self.__class__.__mro__:
if hasattr(klass, '_PROPERTY_EXPORT_BLACKLIST'):
# Treat as module-protected.
# pylint: disable-msg=protected-access
blacklist.extend(klass._PROPERTY_EXPORT_BLACKLIST)
return sorted(set(blacklist))
def put(self):
DB_PUT.inc()
return super(BaseEntity, self).put()
def delete(self):
DB_DELETE.inc()
super(BaseEntity, self).delete()
def for_export(self, transform_fn):
"""Creates an ExportEntity populated from this entity instance.
This method is called during export via tools/etl.py to make an entity
instance safe for export via tools/etl.py when --privacy is passed. For
this to obtain,
1) Properties that need to be purged must be deleted from the instance.
Subclasses can set these fields in _PROPERTY_EXPORT_BLACKLIST.
2) Properties that need to be transformed should be modified in subclass
implementations. In particular, properties with customizable JSON
contents often need to be handled this way.
Args:
transform_fn: function that takes a single argument castable to
string and returns a transformed string of that user data that
is safe for export. If no user data is sensitive, the identity
transform should be used.
Returns:
EventEntity populated with the fields from self, plus a new field
called 'safe_key', containing a string representation of the value
returned by cls.safe_key().
"""
properties = {}
# key is a reserved property and cannot be mutated; write to safe_key
# instead, but refuse to handle entities that set safe_key themselves.
# TODO(johncox): reserve safe_key so it cannot be used in the first
# place?
assert SAFE_KEY_NAME not in self.properties().iterkeys()
properties[SAFE_KEY_NAME] = self.safe_key(self.key(), transform_fn)
for name in self.properties():
if name not in [prop.name for prop in self._get_export_blacklist()]:
properties[name] = getattr(self, name)
return ExportEntity(**properties)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages performance counters of an application and/or its modules."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
def incr_counter_global_value(unused_name, unused_delta):
"""Hook method for global aggregation."""
pass
def get_counter_global_value(unused_name):
"""Hook method for global aggregation."""
return None
class PerfCounter(object):
"""A generic, in-process integer counter."""
def __init__(self, name, doc_string):
self._name = name
self._doc_string = doc_string
self._value = 0
Registry.registered[self.name] = self
def inc(
self, increment=1, context=None): # pylint: disable-msg=unused-argument
"""Increments value by a given increment."""
self._value += increment
incr_counter_global_value(self.name, increment)
@property
def name(self):
return self._name
@property
def doc_string(self):
return self._doc_string
@property
def value(self):
"""Value for this process only."""
return self._value
@property
def global_value(self):
"""Value aggregated across all processes."""
return get_counter_global_value(self.name)
class Registry(object):
"""Holds all registered counters."""
registered = {}
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for managing long running jobs."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from datetime import datetime
import logging
import time
import traceback
import entities
import transforms
from google.appengine import runtime
from google.appengine.api import namespace_manager
from google.appengine.ext import db
from google.appengine.ext import deferred
# A job can be in one of these states.
STATUS_CODE_QUEUED = 0
STATUS_CODE_STARTED = 1
STATUS_CODE_COMPLETED = 2
STATUS_CODE_FAILED = 3
class DurableJob(object):
"""A class that represents a deferred durable job at runtime."""
# The methods in DurableJobEntity are module-level protected
# pylint: disable-msg=protected-access
def __init__(self, app_context):
self._namespace = app_context.get_namespace_name()
self._job_name = 'job-%s-%s' % (
self.__class__.__name__, self._namespace)
def run(self):
"""Override this method to provide actual business logic."""
def main(self):
"""Main method of the deferred task."""
logging.info('Job started: %s', self._job_name)
time_started = time.time()
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._namespace)
try:
db.run_in_transaction(DurableJobEntity._start_job,
self._job_name)
result = self.run()
db.run_in_transaction(DurableJobEntity._complete_job,
self._job_name, transforms.dumps(result),
long(time.time() - time_started))
logging.info('Job completed: %s', self._job_name)
except (Exception, runtime.DeadlineExceededError) as e:
logging.error(traceback.format_exc())
logging.error('Job failed: %s\n%s', self._job_name, e)
db.run_in_transaction(DurableJobEntity._fail_job,
self._job_name, traceback.format_exc(),
long(time.time() - time_started))
raise deferred.PermanentTaskFailure(e)
finally:
namespace_manager.set_namespace(old_namespace)
def submit(self):
"""Submits this job for deferred execution."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._namespace)
db.run_in_transaction(DurableJobEntity._create_job, self._job_name)
deferred.defer(self.main)
finally:
namespace_manager.set_namespace(old_namespace)
def non_transactional_submit(self):
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._namespace)
DurableJobEntity._create_job(self._job_name)
deferred.defer(self.main)
finally:
namespace_manager.set_namespace(old_namespace)
def load(self):
"""Loads the last known state of this job from the datastore."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._namespace)
entity = DurableJobEntity._get_by_name(self._job_name)
return entity
finally:
namespace_manager.set_namespace(old_namespace)
class DurableJobEntity(entities.BaseEntity):
"""A class that represents a persistent database entity of durable job."""
updated_on = db.DateTimeProperty(indexed=True)
execution_time_sec = db.IntegerProperty(indexed=False)
status_code = db.IntegerProperty(indexed=False)
output = db.TextProperty(indexed=False)
@classmethod
def _get_by_name(cls, name):
return DurableJobEntity.get_by_key_name(name)
@classmethod
def _update(cls, name, status_code, output, execution_time_sec):
"""Updates job state in a datastore."""
assert db.is_in_transaction()
job = DurableJobEntity._get_by_name(name)
if not job:
logging.error('Job was not started or was deleted: %s', name)
return
job.updated_on = datetime.now()
job.execution_time_sec = execution_time_sec
job.status_code = status_code
job.output = output
job.put()
@classmethod
def _create_job(cls, name):
"""Creates new or reset a state of existing job in a datastore."""
assert db.is_in_transaction()
job = DurableJobEntity._get_by_name(name)
if not job:
job = DurableJobEntity(key_name=name)
job.updated_on = datetime.now()
job.execution_time_sec = 0
job.status_code = STATUS_CODE_QUEUED
job.output = None
job.put()
@classmethod
def _start_job(cls, name):
return cls._update(name, STATUS_CODE_STARTED, None, 0)
@classmethod
def _complete_job(cls, name, output, execution_time_sec):
return cls._update(
name, STATUS_CODE_COMPLETED, output, execution_time_sec)
@classmethod
def _fail_job(cls, name, output, execution_time_sec):
return cls._update(name, STATUS_CODE_FAILED, output, execution_time_sec)
@property
def has_finished(self):
return self.status_code in [STATUS_CODE_COMPLETED, STATUS_CODE_FAILED]
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages dynamic properties of an application and/or its modules.
An application must explicitly declare properties and provide a type, doc string
and default value for each. The default property values are overridden by
the new values found in the environment variable with the same name. Those are
further overridden by the values found in the datastore. We also try to do all
of this with performance in mind.
"""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import logging
import os
import threading
import time
import appengine_config
import entities
import transforms
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# The default update interval supported.
DEFAULT_UPDATE_INTERVAL_SEC = 60
# The longest update interval supported.
MAX_UPDATE_INTERVAL_SEC = 60 * 5
# Allowed property types.
TYPE_INT = int
TYPE_STR = str
TYPE_BOOL = bool
ALLOWED_TYPES = frozenset([TYPE_INT, TYPE_STR, TYPE_BOOL])
class ConfigProperty(object):
"""A property with name, type, doc_string and a default value."""
def __init__(
self, name, value_type, doc_string,
default_value=None, multiline=False, validator=None):
if value_type not in ALLOWED_TYPES:
raise Exception('Bad value type: %s' % value_type)
self._validator = validator
self._multiline = multiline
self._name = name
self._type = value_type
self._doc_string = doc_string
self._default_value = value_type(default_value)
errors = []
if self._validator and self._default_value:
self._validator(self._default_value, errors)
if errors:
raise Exception('Default value is invalid: %s.' % errors)
Registry.registered[name] = self
@property
def validator(self):
return self._validator
@property
def multiline(self):
return self._multiline
@property
def name(self):
return self._name
@property
def value_type(self):
return self._type
@property
def doc_string(self):
return self._doc_string
@property
def default_value(self):
return self._default_value
def get_environ_value(self):
"""Tries to get value from the environment variables."""
# Look for a name in lower or upper case.
name = None
if self._name.lower() in os.environ:
name = self._name.lower()
else:
if self._name.upper() in os.environ:
name = self._name.upper()
if name:
try:
return True, transforms.string_to_value(
os.environ[name], self.value_type)
except Exception: # pylint: disable-msg=broad-except
logging.error(
'Property %s failed to cast to type %s; removing.',
self._name, self._type)
del os.environ[name]
return False, None
def get_value(self, db_overrides=None):
"""Gets value from overrides (datastore, environment) or default."""
# Try testing overrides.
overrides = Registry.test_overrides
if overrides and self.name in overrides:
return overrides[self.name]
# Try datastore overrides.
if db_overrides and self.name in db_overrides:
return db_overrides[self.name]
# Try environment variable overrides.
has_value, environ_value = self.get_environ_value()
if has_value:
return environ_value
# Use default value as last resort.
return self._default_value
@property
def value(self):
return self.get_value(db_overrides=Registry.get_overrides())
class Registry(object):
"""Holds all registered properties and their various overrides."""
registered = {}
test_overrides = {}
db_overrides = {}
names_with_draft = {}
last_update_time = 0
update_index = 0
threadlocal = threading.local()
REENTRY_ATTR_NAME = 'busy'
@classmethod
def get_overrides(cls, force_update=False):
"""Returns current property overrides, maybe cached."""
now = long(time.time())
age = now - cls.last_update_time
max_age = UPDATE_INTERVAL_SEC.get_value(db_overrides=cls.db_overrides)
# do not update if call is reentrant or outer db transaction exists
busy = hasattr(cls.threadlocal, cls.REENTRY_ATTR_NAME) or (
db.is_in_transaction())
if (not busy) and (force_update or age < 0 or age >= max_age):
# Value of '0' disables all datastore overrides.
if UPDATE_INTERVAL_SEC.get_value() == 0:
cls.db_overrides = {}
return cls.db_overrides
# Load overrides from a datastore.
setattr(cls.threadlocal, cls.REENTRY_ATTR_NAME, True)
try:
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
appengine_config.DEFAULT_NAMESPACE_NAME)
cls._load_from_db()
finally:
namespace_manager.set_namespace(old_namespace)
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to load properties from a database: %s.', str(e))
finally:
delattr(cls.threadlocal, cls.REENTRY_ATTR_NAME)
# Avoid overload and update timestamp even if we failed.
cls.last_update_time = now
cls.update_index += 1
return cls.db_overrides
@classmethod
def _load_from_db(cls):
"""Loads dynamic properties from db."""
logging.info('Reloading properties.')
overrides = {}
drafts = set()
for item in ConfigPropertyEntity.all().fetch(1000):
name = item.key().name()
if name not in cls.registered:
logging.error(
'Property is not registered (skipped): %s', name)
continue
target = cls.registered[name]
if target and item.is_draft:
drafts.add(name)
if target and not item.is_draft:
# Enforce value type.
try:
value = transforms.string_to_value(
item.value, target.value_type)
except Exception: # pylint: disable-msg=broad-except
logging.error(
'Property %s failed to cast to a type %s; removing.',
target.name, target.value_type)
continue
# Enforce value validator.
if target.validator:
errors = []
try:
target.validator(value, errors)
except Exception as e: # pylint: disable-msg=broad-except
errors.append(
'Error validating property %s.\n%s',
(target.name, e))
if errors:
logging.error(
'Property %s has invalid value:\n%s',
target.name, '\n'.join(errors))
continue
overrides[name] = value
cls.db_overrides = overrides
cls.names_with_draft = drafts
class ConfigPropertyEntity(entities.BaseEntity):
"""A class that represents a named configuration property."""
value = db.TextProperty(indexed=False)
is_draft = db.BooleanProperty(indexed=False)
def run_all_unit_tests():
"""Runs all unit tests for this modules."""
str_prop = ConfigProperty('gcb-str-prop', str, ('doc for str_prop'), 'foo')
int_prop = ConfigProperty('gcb-int-prop', int, ('doc for int_prop'), 123)
assert str_prop.default_value == 'foo'
assert str_prop.value == 'foo'
assert int_prop.default_value == 123
assert int_prop.value == 123
# Check os.environ override works.
os.environ[str_prop.name] = 'bar'
assert str_prop.value == 'bar'
del os.environ[str_prop.name]
assert str_prop.value == 'foo'
# Check os.environ override with type casting.
os.environ[int_prop.name] = '12345'
assert int_prop.value == 12345
# Check setting of value is disallowed.
try:
str_prop.value = 'foo'
raise Exception()
except AttributeError:
pass
# Check value of bad type is disregarded.
os.environ[int_prop.name] = 'foo bar'
assert int_prop.value == int_prop.default_value
def validate_update_interval(value, errors):
value = int(value)
if value <= 0 or value >= MAX_UPDATE_INTERVAL_SEC:
errors.append(
'Expected a value between 0 and %s, exclusive.' % (
MAX_UPDATE_INTERVAL_SEC))
UPDATE_INTERVAL_SEC = ConfigProperty(
'gcb_config_update_interval_sec', int, (
'An update interval (in seconds) for reloading runtime properties '
'from a datastore. Using this editor, you can set this value to an '
'integer between 1 and %s, inclusive. To completely disable reloading '
'properties from a datastore, you must set the value to 0. However, '
'you can only set the value to 0 by directly modifying the app.yaml '
'file.' % MAX_UPDATE_INTERVAL_SEC),
default_value=DEFAULT_UPDATE_INTERVAL_SEC,
validator=validate_update_interval)
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom configurations and functions for Google App Engine."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
import os
import sys
# Whether we are running in the production environment.
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
# Set this flag to true to enable bulk downloads of Javascript/CSS files in lib
BUNDLE_LIB_FILES = True
# this is the official location of this app for computing of all relative paths
BUNDLE_ROOT = os.path.dirname(__file__)
# make all Windows and Linux paths have the same separator '/'
BUNDLE_ROOT = BUNDLE_ROOT.replace('\\', '/')
# Default namespace name is '' and not None.
DEFAULT_NAMESPACE_NAME = ''
class _Library(object):
"""DDO that represents a Python library contained in a .zip file."""
def __init__(self, zipfile, relative_path=None):
self._relative_path = relative_path
self._zipfile = zipfile
@property
def file_path(self):
"""Path to the library's file on disk."""
return os.path.join(BUNDLE_ROOT, 'lib', self._zipfile)
@property
def full_path(self):
"""Full path for imports, containing archive-relative paths if any."""
path = self.file_path
if self._relative_path:
path = os.path.join(path, self._relative_path)
return path
# Third-party library zip files.
THIRD_PARTY_LIBS = [
_Library('babel-0.9.6.zip'),
_Library('html5lib-0.95.zip'),
_Library('httplib2-0.8.zip', relative_path='httplib2-0.8/python2'),
_Library('gaepytz-2011h.zip'),
_Library(
'google-api-python-client-1.1.zip',
relative_path='google-api-python-client-1.1'),
# .zip repackaged from .tar.gz download.
_Library('mrs-mapreduce-0.9.zip', relative_path='mrs-mapreduce-0.9'),
# .zip repackaged from .tar.gz download.
_Library('python-gflags-2.0.zip', relative_path='python-gflags-2.0'),
_Library('pyparsing-1.5.7.zip'),
]
def gcb_force_default_encoding(encoding):
"""Force default encoding to a specific value."""
# Eclipse silently sets default encoding to 'utf-8', while GAE forces
# 'ascii'. We need to control this directly for consistency.
if sys.getdefaultencoding() != encoding:
reload(sys)
sys.setdefaultencoding(encoding)
def gcb_init_third_party():
"""Add all third party libraries to system path."""
for lib in THIRD_PARTY_LIBS:
if not os.path.exists(lib.file_path):
raise Exception('Library does not exist: %s' % lib.file_path)
sys.path.insert(0, lib.full_path)
gcb_init_third_party()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers that are not directly related to course content."""
__author__ = 'Saifu Angto (saifu@google.com)'
import base64
import hmac
import os
import time
import urlparse
import appengine_config
from common import jinja_utils
from models import models
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.courses import Course
from models.models import Student
from models.models import StudentProfileDAO
from models.models import TransientStudent
from models.roles import Roles
import webapp2
from google.appengine.api import namespace_manager
from google.appengine.api import users
# The name of the template dict key that stores a course's base location.
COURSE_BASE_KEY = 'gcb_course_base'
# The name of the template dict key that stores data from course.yaml.
COURSE_INFO_KEY = 'course_info'
TRANSIENT_STUDENT = TransientStudent()
XSRF_SECRET_LENGTH = 20
XSRF_SECRET = ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
# Whether to record page load/unload events in a database.
CAN_PERSIST_PAGE_EVENTS = ConfigProperty(
'gcb_can_persist_page_events', bool, (
'Whether or not to record student page interactions in a '
'datastore. Without event recording, you cannot analyze student '
'page interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record tag events in a database.
CAN_PERSIST_TAG_EVENTS = ConfigProperty(
'gcb_can_persist_tag_events', bool, (
'Whether or not to record student tag interactions in a '
'datastore. Without event recording, you cannot analyze student '
'tag interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record events in a database.
CAN_PERSIST_ACTIVITY_EVENTS = ConfigProperty(
'gcb_can_persist_activity_events', bool, (
'Whether or not to record student activity interactions in a '
'datastore. Without event recording, you cannot analyze student '
'activity interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Date format string for displaying datetimes in UTC.
# Example: 2013-03-21 13:00 UTC
HUMAN_READABLE_DATETIME_FORMAT = '%Y-%m-%d, %H:%M UTC'
# Date format string for displaying dates. Example: 2013-03-21
HUMAN_READABLE_DATE_FORMAT = '%Y-%m-%d'
# Time format string for displaying times. Example: 01:16:40 UTC.
HUMAN_READABLE_TIME_FORMAT = '%H:%M:%S UTC'
class PageInitializer(object):
"""Abstract class that defines an interface to initialize page headers."""
@classmethod
def initialize(cls, template_value):
raise NotImplementedError
class DefaultPageInitializer(PageInitializer):
"""Implements default page initializer."""
@classmethod
def initialize(cls, template_value):
pass
class PageInitializerService(object):
"""Installs the appropriate PageInitializer."""
_page_initializer = DefaultPageInitializer
@classmethod
def get(cls):
return cls._page_initializer
@classmethod
def set(cls, page_initializer):
cls._page_initializer = page_initializer
class ReflectiveRequestHandler(object):
"""Uses reflection to handle custom get() and post() requests.
Use this class as a mix-in with any webapp2.RequestHandler to allow request
dispatching to multiple get() and post() methods based on the 'action'
parameter.
Open your existing webapp2.RequestHandler, add this class as a mix-in.
Define the following class variables:
default_action = 'list'
get_actions = ['default_action', 'edit']
post_actions = ['save']
Add instance methods named get_list(self), get_edit(self), post_save(self).
These methods will now be called automatically based on the 'action'
GET/POST parameter.
"""
def create_xsrf_token(self, action):
return XsrfTokenManager.create_xsrf_token(action)
def get(self):
"""Handles GET."""
action = self.request.get('action')
if not action:
action = self.default_action
if action not in self.get_actions:
self.error(404)
return
handler = getattr(self, 'get_%s' % action)
if not handler:
self.error(404)
return
return handler()
def post(self):
"""Handles POST."""
action = self.request.get('action')
if not action or action not in self.post_actions:
self.error(404)
return
handler = getattr(self, 'post_%s' % action)
if not handler:
self.error(404)
return
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
self.error(403)
return
return handler()
class ApplicationHandler(webapp2.RequestHandler):
"""A handler that is aware of the application context."""
@classmethod
def is_absolute(cls, url):
return bool(urlparse.urlparse(url).scheme)
@classmethod
def get_base_href(cls, handler):
"""Computes current course <base> href."""
base = handler.app_context.get_slug()
if not base.endswith('/'):
base = '%s/' % base
# For IE to work with the <base> tag, its href must be an absolute URL.
if not cls.is_absolute(base):
parts = urlparse.urlparse(handler.request.url)
base = urlparse.urlunparse(
(parts.scheme, parts.netloc, base, None, None, None))
return base
def __init__(self, *args, **kwargs):
super(ApplicationHandler, self).__init__(*args, **kwargs)
self.template_value = {}
def get_template(self, template_file, additional_dirs=None):
"""Computes location of template files for the current namespace."""
self.template_value[COURSE_INFO_KEY] = self.app_context.get_environ()
self.template_value['is_course_admin'] = Roles.is_course_admin(
self.app_context)
self.template_value[
'is_read_write_course'] = self.app_context.fs.is_read_write()
self.template_value['is_super_admin'] = Roles.is_super_admin()
self.template_value[COURSE_BASE_KEY] = self.get_base_href(self)
template_environ = self.app_context.get_template_environ(
self.template_value[COURSE_INFO_KEY]['course']['locale'],
additional_dirs
)
template_environ.filters[
'gcb_tags'] = jinja_utils.get_gcb_tags_filter(self)
return template_environ.get_template(template_file)
def canonicalize_url(self, location):
"""Adds the current namespace URL prefix to the relative 'location'."""
is_relative = (
not self.is_absolute(location) and
not location.startswith(self.app_context.get_slug()))
has_slug = (
self.app_context.get_slug() and self.app_context.get_slug() != '/')
if is_relative and has_slug:
location = '%s%s' % (self.app_context.get_slug(), location)
return location
def redirect(self, location, normalize=True):
if normalize:
location = self.canonicalize_url(location)
super(ApplicationHandler, self).redirect(location)
class BaseHandler(ApplicationHandler):
"""Base handler."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
self.course = None
def get_course(self):
if not self.course:
self.course = Course(self)
return self.course
def find_unit_by_id(self, unit_id):
"""Gets a unit with a specific id or fails with an exception."""
return self.get_course().find_unit_by_id(unit_id)
def get_units(self):
"""Gets all units in the course."""
return self.get_course().get_units()
def get_lessons(self, unit_id):
"""Gets all lessons (in order) in the specific course unit."""
return self.get_course().get_lessons(unit_id)
def get_progress_tracker(self):
"""Gets the progress tracker for the course."""
return self.get_course().get_progress_tracker()
def get_user(self):
"""Get the current user."""
return users.get_current_user()
def personalize_page_and_get_user(self):
"""If the user exists, add personalized fields to the navbar."""
user = self.get_user()
PageInitializerService.get().initialize(self.template_value)
if hasattr(self, 'app_context'):
self.template_value['can_register'] = self.app_context.get_environ(
)['reg_form']['can_register']
if user:
self.template_value['email'] = user.email()
self.template_value['logoutUrl'] = (
users.create_logout_url(self.request.uri))
self.template_value['transient_student'] = False
# configure page events
self.template_value['record_tag_events'] = (
CAN_PERSIST_TAG_EVENTS.value)
self.template_value['record_page_events'] = (
CAN_PERSIST_PAGE_EVENTS.value)
self.template_value['record_events'] = (
CAN_PERSIST_ACTIVITY_EVENTS.value)
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
else:
self.template_value['loginUrl'] = users.create_login_url(
self.request.uri)
self.template_value['transient_student'] = True
return None
return user
def personalize_page_and_get_enrolled(
self, supports_transient_student=False):
"""If the user is enrolled, add personalized fields to the navbar."""
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
self.template_value['transient_student'] = True
student = TRANSIENT_STUDENT
if student.is_transient:
if supports_transient_student and (
self.app_context.get_environ()['course']['browsable']):
return TRANSIENT_STUDENT
elif user is None:
self.redirect(
users.create_login_url(self.request.uri), normalize=False
)
return None
else:
self.redirect('/preview')
return None
# Patch Student models which (for legacy reasons) do not have a user_id
# attribute set.
if not student.user_id:
student.user_id = user.user_id()
student.put()
return student
def assert_xsrf_token_or_fail(self, request, action):
"""Asserts the current request has proper XSRF token or fails."""
token = request.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
self.error(403)
return False
return True
def render(self, template_file):
"""Renders a template."""
template = self.get_template(template_file)
self.response.out.write(template.render(self.template_value))
class BaseRESTHandler(BaseHandler):
"""Base REST handler."""
def assert_xsrf_token_or_fail(self, token_dict, action, args_dict):
"""Asserts that current request has proper XSRF token or fails."""
token = token_dict.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
transforms.send_json_response(
self, 403,
'Bad XSRF token. Please reload the page and try again',
args_dict)
return False
return True
def validation_error(self, message, key=None):
"""Deliver a validation message."""
if key:
transforms.send_json_response(
self, 412, message, payload_dict={'key': key})
else:
transforms.send_json_response(self, 412, message)
class PreviewHandler(BaseHandler):
"""Handler for viewing course preview."""
def get(self):
"""Handles GET requests."""
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
student = TRANSIENT_STUDENT
# If the course is browsable, or the student is logged in and
# registered, redirect to the main course page.
if ((student and not student.is_transient) or
self.app_context.get_environ()['course']['browsable']):
self.redirect('/course')
return
self.template_value['transient_student'] = True
self.template_value['can_register'] = self.app_context.get_environ(
)['reg_form']['can_register']
self.template_value['navbar'] = {'course': True}
self.template_value['units'] = self.get_units()
self.template_value['show_registration_page'] = True
course = self.app_context.get_environ()['course']
self.template_value['video_exists'] = bool(
'main_video' in course and
'url' in course['main_video'] and
course['main_video']['url'])
self.template_value['image_exists'] = bool(
'main_image' in course and
'url' in course['main_image'] and
course['main_image']['url'])
if user:
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
additional_registration_fields = self.app_context.get_environ(
)['reg_form']['additional_registration_fields']
if profile is not None and not additional_registration_fields:
self.template_value['show_registration_page'] = False
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('preview.html')
class RegisterHandler(BaseHandler):
"""Handler for course registration."""
def get(self):
"""Handles GET request."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(
users.create_login_url(self.request.uri), normalize=False)
return
student = Student.get_enrolled_student_by_email(user.email())
if student:
self.redirect('/course')
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.redirect('/course#registration_closed')
return
# pre-fill nick name from the profile if available
self.template_value['current_name'] = ''
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
if profile and profile.nick_name:
self.template_value['current_name'] = profile.nick_name
self.template_value['navbar'] = {}
self.template_value['transient_student'] = True
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('register.html')
def post(self):
"""Handles POST requests."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(
users.create_login_url(self.request.uri), normalize=False)
return
if not self.assert_xsrf_token_or_fail(self.request, 'register-post'):
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.redirect('/course#registration_closed')
return
if 'name_from_profile' in self.request.POST.keys():
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
name = profile.nick_name
else:
name = self.request.get('form01')
Student.add_new_student_for_current_user(
name, transforms.dumps(self.request.POST.items()))
# Render registration confirmation page
self.redirect('/course#registration_confirmation')
class ForumHandler(BaseHandler):
"""Handler for forum page."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
self.template_value['navbar'] = {'forum': True}
self.render('forum.html')
class StudentProfileHandler(BaseHandler):
"""Handles the click to 'Progress' link in the nav bar."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
course = self.get_course()
name = student.name
profile = student.profile
if profile:
name = profile.nick_name
self.template_value['navbar'] = {'progress': True}
self.template_value['student'] = student
self.template_value['student_name'] = name
self.template_value['date_enrolled'] = student.enrolled_on.strftime(
HUMAN_READABLE_DATE_FORMAT)
self.template_value['score_list'] = course.get_all_scores(student)
self.template_value['overall_score'] = course.get_overall_score(student)
self.template_value['student_edit_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-edit'))
self.template_value['can_edit_name'] = (
not models.CAN_SHARE_STUDENT_PROFILE.value)
self.render('student_profile.html')
class StudentEditStudentHandler(BaseHandler):
"""Handles edits to student records by students."""
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-edit'):
return
Student.rename_current(self.request.get('name'))
self.redirect('/student/home')
class StudentUnenrollHandler(BaseHandler):
"""Handler for students to unenroll themselves."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
self.template_value['student'] = student
self.template_value['navbar'] = {}
self.template_value['student_unenroll_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-unenroll'))
self.render('unenroll_confirmation_check.html')
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-unenroll'):
return
Student.set_enrollment_status_for_current(False)
self.template_value['navbar'] = {}
self.template_value['transient_student'] = True
self.render('unenroll_confirmation.html')
class XsrfTokenManager(object):
"""Provides XSRF protection by managing action/user tokens in memcache."""
# Max age of the token (4 hours).
XSRF_TOKEN_AGE_SECS = 60 * 60 * 4
# Token delimiters.
DELIMITER_PRIVATE = ':'
DELIMITER_PUBLIC = '/'
# Default nickname to use if a user does not have a nickname,
USER_ID_DEFAULT = 'default'
@classmethod
def init_xsrf_secret_if_none(cls):
"""Verifies that non-default XSRF secret exists; creates one if not."""
# Any non-default value is fine.
if XSRF_SECRET.value and XSRF_SECRET.value != XSRF_SECRET.default_value:
return
# All property manipulations must run in the default namespace.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
appengine_config.DEFAULT_NAMESPACE_NAME)
# Look in the datastore directly.
entity = ConfigPropertyEntity.get_by_key_name(XSRF_SECRET.name)
if not entity:
entity = ConfigPropertyEntity(key_name=XSRF_SECRET.name)
# Any non-default non-None value is fine.
if (entity.value and not entity.is_draft and
(str(entity.value) != str(XSRF_SECRET.default_value))):
return
# Initialize to random value.
entity.value = base64.urlsafe_b64encode(
os.urandom(XSRF_SECRET_LENGTH))
entity.is_draft = False
entity.put()
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _create_token(cls, action_id, issued_on):
"""Creates a string representation (digest) of a token."""
cls.init_xsrf_secret_if_none()
# We have decided to use transient tokens stored in memcache to reduce
# datastore costs. The token has 4 parts: hash of the actor user id,
# hash of the action, hash of the time issued and the plain text of time
# issued.
# Lookup user id.
user = users.get_current_user()
if user:
user_id = user.user_id()
else:
user_id = cls.USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digester = hmac.new(str(XSRF_SECRET.value))
digester.update(str(user_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(action_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(issued_on))
digest = digester.digest()
token = '%s%s%s' % (
issued_on, cls.DELIMITER_PUBLIC, base64.urlsafe_b64encode(digest))
return token
@classmethod
def create_xsrf_token(cls, action):
return cls._create_token(action, time.time())
@classmethod
def is_xsrf_token_valid(cls, token, action):
"""Validate a given XSRF token by retrieving it from memcache."""
try:
parts = token.split(cls.DELIMITER_PUBLIC)
if len(parts) != 2:
return False
issued_on = long(parts[0])
age = time.time() - issued_on
if age > cls.XSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(action, issued_on)
if authentic_token == token:
return True
return False
except Exception: # pylint: disable-msg=broad-except
return False
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enables hosting of multiple courses in one application instance.
We used to allow hosting of only one course in one Google App Engine instance.
Now we allow hosting of many courses simultaneously. To configure multiple
courses one must set an environment variable in app.yaml file, for example:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/coursea:/courses/a, course:/courseb:/courses/b'
...
This variable holds a ',' or newline separated list of course entries. Each
course entry has four ':' separated parts: the word 'course', the URL prefix,
and the file system location for the site files. If the third part is empty,
the course assets are stored in a datastore instead of the file system. The
fourth, optional part, is the name of the course namespace.
The URL prefix specifies, how will the course URL appear in the browser. In the
example above, the courses will be mapped to http://www.example.com[/coursea]
and http://www.example.com[/courseb].
The file system location of the files specifies, which files to serve for the
course. For each course we expect three sub-folders: 'assets', 'views', and
'data'. The 'data' folder must contain the CSV files that define the course
layout, the 'assets' and 'views' should contain the course specific files and
jinja2 templates respectively. In the example above, the course files are
expected to be placed into folders '/courses/a' and '/courses/b' of your Google
App Engine installation respectively. If this value is absent a datastore is
used to store course assets, not the file system.
By default Course Builder handles static '/assets' files using a custom
handler. You may choose to handle '/assets' files of your course as 'static'
files using Google App Engine handler. You can do so by creating a new static
file handler entry in your app.yaml and placing it before our main course
handler.
If you have an existing course developed using Course Builder and do NOT want
to host multiple courses, there is nothing for you to do. A following default
rule is silently created for you:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/:/'
...
It sets the '/' as the base URL for the course, uses root folder of your Google
App Engine installation to look for course /assets/..., /data/..., and
/views/... and uses blank datastore and memcache namespace. All in all,
everything behaves just as it did in the prior version of Course Builder when
only one course was supported.
If you have existing course developed using Course Builder and DO want to start
hosting multiple courses here are the steps. First, define the courses
configuration environment variable as described above. Second, copy existing
'assets', 'data' and 'views' folders of your course into the new location, for
example '/courses/mycourse'.
If you have an existing course built on a previous version of Course Builder
and you now decided to use new URL prefix, which is not '/', you will need
to update your old course html template and JavaScript files. You typically
would have to make two modifications. First, replace all absolute URLs with
the relative URLs. For example, if you had <a href='/forum'>..</a>, you will
need to replace it with <a href='forum'>..</a>. Second, you need to add <base>
tag at the top of you course 'base.html' and 'base_registration.html' files,
like this:
...
<head>
<base href="{{ gcb_course_base }}" />
...
Current Course Builder release already has all these modifications.
Note, that each 'course' runs in a separate Google App Engine namespace. The
name of the namespace is derived from the course files location. In the example
above, the course files are stored in the folder '/courses/a', which be mapped
to the namespace name 'gcb-courses-a'. The namespaces can't contain '/', so we
replace them with '-' and prefix the namespace with the project abbreviation
'gcb'. Remember these namespace names, you will need to use them if/when
accessing server administration panel, viewing objects in the datastore, etc.
Don't move the files to another folder after your course starts as a new folder
name will create a new namespace name and old data will no longer be used. You
are free to rename the course URL prefix at any time. Once again, if you are
not hosting multiple courses, your course will run in a default namespace
(None).
Good luck!
"""
import logging
import mimetypes
import os
import posixpath
import re
import threading
import urlparse
import zipfile
import appengine_config
from common import safe_dom
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.config import Registry
from models.counters import PerfCounter
from models.courses import Course
from models.roles import Roles
from models.vfs import AbstractFileSystem
from models.vfs import DatastoreBackedFileSystem
from models.vfs import LocalReadOnlyFileSystem
import webapp2
from webapp2_extras import i18n
import utils
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import zipserve
# base name for all course namespaces
GCB_BASE_COURSE_NAMESPACE = 'gcb-course'
# these folder and file names are reserved
GCB_ASSETS_FOLDER_NAME = os.path.normpath('/assets/')
GCB_VIEWS_FOLDER_NAME = os.path.normpath('/views/')
GCB_DATA_FOLDER_NAME = os.path.normpath('/data/')
GCB_CONFIG_FILENAME = os.path.normpath('/course.yaml')
# modules do have files that must be inheritable, like oeditor.html
GCB_MODULES_FOLDER_NAME = os.path.normpath('/modules/')
# Files in these folders are inheritable between file systems.
GCB_INHERITABLE_FOLDER_NAMES = [
os.path.join(GCB_ASSETS_FOLDER_NAME, 'css/'),
os.path.join(GCB_ASSETS_FOLDER_NAME, 'img/'),
os.path.join(GCB_ASSETS_FOLDER_NAME, 'lib/'),
GCB_VIEWS_FOLDER_NAME,
GCB_MODULES_FOLDER_NAME]
# supported site types
SITE_TYPE_COURSE = 'course'
# default 'Cache-Control' HTTP header for static files
DEFAULT_CACHE_CONTROL_MAX_AGE = 600
DEFAULT_CACHE_CONTROL_PUBLIC = 'public'
# default HTTP headers for dynamic responses
DEFAULT_EXPIRY_DATE = 'Mon, 01 Jan 1990 00:00:00 GMT'
DEFAULT_PRAGMA = 'no-cache'
# enable debug output
DEBUG_INFO = False
# thread local storage for current request PATH_INFO
PATH_INFO_THREAD_LOCAL = threading.local()
# performance counters
STATIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-static',
'A number of times request was served via static handler.')
DYNAMIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-dynamic',
'A number of times request was served via dynamic handler.')
ZIP_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-zip',
'A number of times request was served via zip handler.')
NO_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-none',
'A number of times request was not matched to any handler.')
HTTP_BYTES_IN = PerfCounter(
'gcb-sites-bytes-in',
'A number of bytes received from clients by the handler.')
HTTP_BYTES_OUT = PerfCounter(
'gcb-sites-bytes-out',
'A number of bytes sent out from the handler to clients.')
HTTP_STATUS_200 = PerfCounter(
'gcb-sites-http-20x',
'A number of times HTTP status code 20x was returned.')
HTTP_STATUS_300 = PerfCounter(
'gcb-sites-http-30x',
'A number of times HTTP status code 30x was returned.')
HTTP_STATUS_400 = PerfCounter(
'gcb-sites-http-40x',
'A number of times HTTP status code 40x was returned.')
HTTP_STATUS_500 = PerfCounter(
'gcb-sites-http-50x',
'A number of times HTTP status code 50x was returned.')
COUNTER_BY_HTTP_CODE = {
200: HTTP_STATUS_200, 300: HTTP_STATUS_300, 400: HTTP_STATUS_400,
500: HTTP_STATUS_500}
def count_stats(handler):
"""Records statistics about the request and the response."""
try:
# Record request bytes in.
if handler.request and handler.request.content_length:
HTTP_BYTES_IN.inc(handler.request.content_length)
# Record response HTTP status code.
if handler.response and handler.response.status_int:
rounded_status_code = (handler.response.status_int / 100) * 100
counter = COUNTER_BY_HTTP_CODE[rounded_status_code]
if not counter:
logging.error(
'Unknown HTTP status code: %s.',
handler.response.status_code)
else:
counter.inc()
# Record response bytes out.
if handler.response and handler.response.content_length:
HTTP_BYTES_OUT.inc(handler.response.content_length)
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Failed to count_stats(): %s.', str(e))
def has_path_info():
"""Checks if PATH_INFO is defined for the thread local."""
return hasattr(PATH_INFO_THREAD_LOCAL, 'path')
def set_path_info(path):
"""Stores PATH_INFO in thread local."""
if not path:
raise Exception('Use \'unset()\' instead.')
if has_path_info():
raise Exception('Expected no path set.')
PATH_INFO_THREAD_LOCAL.path = path
PATH_INFO_THREAD_LOCAL.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(
ApplicationContext.get_namespace_name_for_request())
def get_path_info():
"""Gets PATH_INFO from thread local."""
return PATH_INFO_THREAD_LOCAL.path
def unset_path_info():
"""Removed PATH_INFO from thread local."""
if not has_path_info():
raise Exception('Expected valid path already set.')
namespace_manager.set_namespace(
PATH_INFO_THREAD_LOCAL.old_namespace)
del PATH_INFO_THREAD_LOCAL.old_namespace
del PATH_INFO_THREAD_LOCAL.path
def debug(message):
if DEBUG_INFO:
logging.info(message)
def _validate_appcontext_list(contexts, strict=False):
"""Validates a list of application contexts."""
# Check rule order is enforced. If we allowed any order and '/a' was before
# '/aa', the '/aa' would never match.
for i in range(len(contexts)):
for j in range(i + 1, len(contexts)):
above = contexts[i]
below = contexts[j]
if below.get_slug().startswith(above.get_slug()):
raise Exception(
'Please reorder course entries to have '
'\'%s\' before \'%s\'.' % (
below.get_slug(), above.get_slug()))
# Make sure '/' is mapped.
if strict:
is_root_mapped = False
for context in contexts:
if context.slug == '/':
is_root_mapped = True
break
if not is_root_mapped:
raise Exception(
'Please add an entry with \'/\' as course URL prefix.')
def get_all_courses(rules_text=None):
"""Reads all course rewrite rule definitions from environment variable."""
# Normalize text definition.
if not rules_text:
rules_text = GCB_COURSES_CONFIG.value
rules_text = rules_text.replace(',', '\n')
# Use cached value if exists.
cached = ApplicationContext.ALL_COURSE_CONTEXTS_CACHE.get(rules_text)
if cached:
return cached
# Compute the list of contexts.
rules = rules_text.split('\n')
slugs = {}
namespaces = {}
all_contexts = []
for rule in rules:
rule = rule.strip()
if not rule or rule.startswith('#'):
continue
parts = rule.split(':')
# validate length
if len(parts) < 3:
raise Exception('Expected rule definition of the form '
' \'type:slug:folder[:ns]\', got %s: ' % rule)
# validate type
if parts[0] != SITE_TYPE_COURSE:
raise Exception('Expected \'%s\', found: \'%s\'.'
% (SITE_TYPE_COURSE, parts[0]))
site_type = parts[0]
# validate slug
slug = parts[1]
slug_parts = urlparse.urlparse(slug)
if slug != slug_parts[2]:
raise Exception(
'Bad rule: \'%s\'. '
'Course URL prefix \'%s\' must be a simple URL fragment.' % (
rule, slug))
if slug in slugs:
raise Exception(
'Bad rule: \'%s\'. '
'Course URL prefix \'%s\' is already defined.' % (rule, slug))
slugs[slug] = True
# validate folder name
if parts[2]:
folder = parts[2]
# pylint: disable-msg=g-long-lambda
create_fs = lambda unused_ns: LocalReadOnlyFileSystem(
logical_home_folder=folder)
else:
folder = '/'
# pylint: disable-msg=g-long-lambda
create_fs = lambda ns: DatastoreBackedFileSystem(
ns=ns,
logical_home_folder=appengine_config.BUNDLE_ROOT,
inherits_from=LocalReadOnlyFileSystem(logical_home_folder='/'),
inheritable_folders=GCB_INHERITABLE_FOLDER_NAMES)
# validate or derive namespace
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
if len(parts) == 4:
namespace = parts[3]
else:
if folder and folder != '/':
namespace = '%s%s' % (GCB_BASE_COURSE_NAMESPACE,
folder.replace('/', '-'))
try:
namespace_manager.validate_namespace(namespace)
except Exception as e:
raise Exception(
'Error validating namespace "%s" in rule "%s"; %s.' % (
namespace, rule, e))
if namespace in namespaces:
raise Exception(
'Bad rule \'%s\'. '
'Namespace \'%s\' is already defined.' % (rule, namespace))
namespaces[namespace] = True
all_contexts.append(ApplicationContext(
site_type, slug, folder, namespace,
AbstractFileSystem(create_fs(namespace)),
raw=rule))
_validate_appcontext_list(all_contexts)
# Cache result to avoid re-parsing over and over.
ApplicationContext.ALL_COURSE_CONTEXTS_CACHE = {rules_text: all_contexts}
return all_contexts
def get_course_for_current_request():
"""Chooses app_context that matches current request context path."""
# get path if defined
if not has_path_info():
return None
path = get_path_info()
# Get all rules.
app_contexts = get_all_courses()
# Match a path to an app_context.
# TODO(psimakov): linear search is unacceptable
for app_context in app_contexts:
if (path == app_context.get_slug() or
path.startswith('%s/' % app_context.get_slug()) or
app_context.get_slug() == '/'):
return app_context
debug('No mapping for: %s' % path)
return None
def get_app_context_for_namespace(namespace):
"""Chooses the app_context that matches the current namespace."""
app_contexts = get_all_courses()
# TODO(psimakov): linear search is unacceptable
for app_context in app_contexts:
if app_context.get_namespace_name() == namespace:
return app_context
debug('No app_context in namespace: %s' % namespace)
return None
def path_join(base, path):
"""Joins 'base' and 'path' ('path' is interpreted as a relative path).
This method is like os.path.join(), but 'path' is interpreted relatively.
E.g., os.path.join('/a/b', '/c') yields '/c', but this function yields
'/a/b/c'.
Args:
base: The base path.
path: The path to append to base; this is treated as a relative path.
Returns:
The path obtaining by appending 'path' to 'base'.
"""
if os.path.isabs(path):
# Remove drive letter (if we are on Windows).
unused_drive, path_no_drive = os.path.splitdrive(path)
# Remove leading path separator.
path = path_no_drive[1:]
return AbstractFileSystem.normpath(os.path.join(base, path))
def abspath(home_folder, filename):
"""Creates an absolute URL for a filename in a home folder."""
return path_join(appengine_config.BUNDLE_ROOT,
path_join(home_folder, filename))
def unprefix(path, prefix):
"""Remove the prefix from path. Append '/' if an empty string results."""
if not path.startswith(prefix):
raise Exception('Not prefixed.')
if prefix != '/':
path = path[len(prefix):]
if not path:
path = '/'
return path
def set_static_resource_cache_control(handler):
"""Properly sets Cache-Control for a WebOb/webapp2 response."""
handler.response.cache_control.no_cache = None
handler.response.cache_control.public = DEFAULT_CACHE_CONTROL_PUBLIC
handler.response.cache_control.max_age = DEFAULT_CACHE_CONTROL_MAX_AGE
def set_default_response_headers(handler):
"""Sets the default headers for outgoing responses."""
# This conditional is needed for the unit tests to pass, since their
# handlers do not have a response attribute.
if handler.response:
# Only set the headers for dynamic responses. This happens precisely
# when the handler is an instance of utils.ApplicationHandler.
if isinstance(handler, utils.ApplicationHandler):
handler.response.cache_control.no_cache = True
handler.response.cache_control.must_revalidate = True
handler.response.expires = DEFAULT_EXPIRY_DATE
handler.response.pragma = DEFAULT_PRAGMA
def make_zip_handler(zipfilename):
"""Creates a handler that serves files from a zip file."""
class CustomZipHandler(zipserve.ZipHandler):
"""Custom ZipHandler that properly controls caching."""
def get(self, *args):
"""Handles GET request."""
path = None
# try to use path passed explicitly
if args and len(args) >= 1:
path = args[0]
# use path_translated if no name was passed explicitly
if not path:
path = self.path_translated
# we need to remove leading slash and all filenames inside zip
# file must be relative
if path and path.startswith('/') and len(path) > 1:
path = path[1:]
if not path:
self.error(404)
return
ZIP_HANDLER_COUNT.inc()
self.ServeFromZipFile(zipfilename, path)
count_stats(self)
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
return CustomZipHandler
class CssComboZipHandler(zipserve.ZipHandler):
"""A handler which combines a files served from a zip file.
The paths for the files within the zip file are presented
as query parameters.
"""
zipfile_cache = {}
def get(self):
raise NotImplementedError()
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
def serve_from_zip_file(self, zipfilename, static_file_handler):
"""Assemble the download by reading file from zip file."""
zipfile_object = self.zipfile_cache.get(zipfilename)
if zipfile_object is None:
try:
zipfile_object = zipfile.ZipFile(zipfilename)
except (IOError, RuntimeError, zipfile.BadZipfile), err:
# If the zipfile can't be opened, that's probably a
# configuration error in the app, so it's logged as an error.
logging.error('Can\'t open zipfile %s: %s', zipfilename, err)
zipfile_object = '' # Special value to cache negative results.
self.zipfile_cache[zipfilename] = zipfile_object
if not zipfile_object:
self.error(404)
return
all_content_types = set()
for name in self.request.GET:
all_content_types.add(mimetypes.guess_type(name))
if len(all_content_types) == 1:
content_type = all_content_types.pop()[0]
else:
content_type = 'text/plain'
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders()
for name in self.request.GET:
try:
content = zipfile_object.read(name)
if content_type == 'text/css':
content = self._fix_css_paths(
name, content, static_file_handler)
self.response.out.write(content)
except (KeyError, RuntimeError), err:
logging.error('Not found %s in %s', name, zipfilename)
def _fix_css_paths(self, path, css, static_file_handler):
"""Transform relative url() settings in CSS to absolute.
This is necessary because a url setting, e.g., url(foo.png), is
interpreted as relative to the location of the CSS file. However
in the case of a bundled CSS file, obtained from a URL such as
http://place.com/cb/combo?a/b/c/foo.css
the browser would believe that the location for foo.png was
http://place.com/cb/foo.png
and not
http://place.com/cb/a/b/c/foo.png
Thus we transform the url from
url(foo.png)
to
url(/static_file_service/a/b/c/foo.png)
Args:
path: the path to the CSS file within the ZIP file
css: the content of the CSS file
static_file_handler: the base handler to serve the referenced file
Returns:
The CSS with all relative URIs rewritten to absolute URIs.
"""
base = static_file_handler + posixpath.split(path)[0] + '/'
css = css.decode('utf-8')
css = re.sub(r'url\(([^http|^https]\S+)\)', r'url(%s\1)' % base, css)
return css
def make_css_combo_zip_handler(zipfilename, static_file_handler):
class CustomCssComboZipHandler(CssComboZipHandler):
def get(self):
self.serve_from_zip_file(zipfilename, static_file_handler)
return CustomCssComboZipHandler
class AssetHandler(webapp2.RequestHandler):
"""Handles serving of static resources located on the file system."""
def __init__(self, app_context, filename):
self.app_context = app_context
self.filename = filename
def get_mime_type(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
return default
return guess
def _can_view(self, fs, stream):
"""Checks if current user can view stream."""
public = not fs.is_draft(stream)
return public or Roles.is_course_admin(self.app_context)
def get(self):
"""Handles GET requests."""
debug('File: %s' % self.filename)
if not self.app_context.fs.isfile(self.filename):
self.error(404)
return
stream = self.app_context.fs.open(self.filename)
if not self._can_view(self.app_context.fs, stream):
self.error(403)
return
set_static_resource_cache_control(self)
self.response.headers['Content-Type'] = self.get_mime_type(
self.filename)
self.response.write(stream.read())
class ApplicationContext(object):
"""An application context for a request/response."""
# Here we store a map of a text definition of the courses to be parsed, and
# a fully validated array of ApplicationContext objects that they define.
# This is cached in process and automatically recomputed when text
# definition changes.
ALL_COURSE_CONTEXTS_CACHE = {}
@classmethod
def get_namespace_name_for_request(cls):
"""Gets the name of the namespace to use for this request.
(Examples of such namespaces are NDB and memcache.)
Returns:
The namespace for the current request, or None if no course matches
the current request context path.
"""
course = get_course_for_current_request()
if course:
return course.namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def after_create(cls, instance):
"""Override this method to manipulate freshly created instance."""
pass
def __init__(self, site_type, slug, homefolder, namespace, fs, raw=None):
"""Creates new application context.
Args:
site_type: Specifies the type of context. Must be 'course' for now.
slug: A common context path prefix for all URLs in the context.
homefolder: A folder with the assets belonging to this context.
namespace: A name of a datastore namespace for use by this context.
fs: A file system object to be used for accessing homefolder.
raw: A raw representation of this course rule (course:/:/).
Returns:
The new instance of namespace object.
"""
self.type = site_type
self.slug = slug
self.homefolder = homefolder
self.namespace = namespace
self._fs = fs
self._raw = raw
self.after_create(self)
@ property
def raw(self):
return self._raw
@ property
def fs(self):
return self._fs
@property
def now_available(self):
course = self.get_environ().get('course')
return course and course.get('now_available')
def get_title(self):
return self.get_environ()['course']['title']
def get_namespace_name(self):
return self.namespace
def get_home_folder(self):
return self.homefolder
def get_slug(self):
return self.slug
def get_config_filename(self):
"""Returns absolute location of a course configuration file."""
filename = abspath(self.get_home_folder(), GCB_CONFIG_FILENAME)
debug('Config file: %s' % filename)
return filename
def get_environ(self):
return Course.get_environ(self)
def get_home(self):
"""Returns absolute location of a course folder."""
path = abspath(self.get_home_folder(), '')
return path
def get_template_home(self):
"""Returns absolute location of a course template folder."""
path = abspath(self.get_home_folder(), GCB_VIEWS_FOLDER_NAME)
return path
def get_data_home(self):
"""Returns absolute location of a course data folder."""
path = abspath(self.get_home_folder(), GCB_DATA_FOLDER_NAME)
return path
def get_template_environ(self, locale, additional_dirs):
"""Create and configure jinja template evaluation environment."""
template_dir = self.get_template_home()
dirs = [template_dir]
if additional_dirs:
dirs += additional_dirs
jinja_environment = self.fs.get_jinja_environ(dirs)
i18n.get_i18n().set_locale(locale)
jinja_environment.install_gettext_translations(i18n)
return jinja_environment
def _courses_config_validator(rules_text, errors):
"""Validates a textual definition of courses entries."""
try:
_validate_appcontext_list(
get_all_courses(rules_text=rules_text))
except Exception as e: # pylint: disable-msg=broad-except
errors.append(str(e))
def validate_new_course_entry_attributes(name, title, admin_email, errors):
"""Validates new course attributes."""
if not name or len(name) < 3:
errors.append(
'The unique name associated with the course must be at least '
'three characters long.')
if not re.match('[_a-z0-9]+$', name, re.IGNORECASE):
errors.append(
'The unique name associated with the course should contain only '
'lowercase letters, numbers, or underscores.')
if not title or len(title) < 3:
errors.append('The course title is too short.')
if not admin_email or '@' not in admin_email:
errors.append('Please enter a valid email address.')
@db.transactional()
def _add_new_course_entry_to_persistent_configuration(raw):
"""Adds new raw course entry definition to the datastore settings.
This loads all current datastore course entries and adds a new one. It
also find the best place to add the new entry at the further down the list
the better, because entries are applied in the order of declaration.
Args:
raw: The course entry rule: 'course:/foo::ns_foo'.
Returns:
True if added, False if not. False almost always means a duplicate rule.
"""
# Get all current entries from a datastore.
entity = ConfigPropertyEntity.get_by_key_name(GCB_COURSES_CONFIG.name)
if not entity:
entity = ConfigPropertyEntity(key_name=GCB_COURSES_CONFIG.name)
entity.is_draft = False
if not entity.value:
entity.value = GCB_COURSES_CONFIG.value
lines = entity.value.splitlines()
# Add new entry to the rest of the entries. Since entries are matched
# in the order of declaration, try to find insertion point further down.
final_lines_text = None
for index in reversed(range(0, len(lines) + 1)):
# Create new rule list putting new item at index position.
new_lines = lines[:]
new_lines.insert(index, raw)
new_lines_text = '\n'.join(new_lines)
# Validate the rule list definition.
errors = []
_courses_config_validator(new_lines_text, errors)
if not errors:
final_lines_text = new_lines_text
break
# Save updated course entries.
if final_lines_text:
entity.value = final_lines_text
entity.put()
return True
return False
def add_new_course_entry(unique_name, title, admin_email, errors):
"""Validates course attributes and adds the course."""
# Validate.
validate_new_course_entry_attributes(
unique_name, title, admin_email, errors)
if errors:
return
# Create new entry and check it is valid.
raw = 'course:/%s::ns_%s' % (unique_name, unique_name)
try:
get_all_courses(rules_text=raw)
except Exception as e: # pylint: disable-msg=broad-except
errors.append('Failed to add entry: %s.\n%s' % (raw, e))
if errors:
return
# Add new entry to persistence.
if not _add_new_course_entry_to_persistent_configuration(raw):
errors.append(
'Unable to add new entry \'%s\'. Entry with the '
'same name \'%s\' already exists.' % (raw, unique_name))
return
return raw
GCB_COURSES_CONFIG = ConfigProperty(
'gcb_courses_config', str,
safe_dom.NodeList().append(
safe_dom.Element('p').add_text("""
A newline separated list of course entries. Each course entry has
four parts, separated by colons (':'). The four parts are:""")
).append(
safe_dom.Element('ol').add_child(
safe_dom.Element('li').add_text(
'The word \'course\', which is a required element.')
).add_child(
safe_dom.Element('li').add_text("""
A unique course URL prefix. Examples could be '/cs101' or '/art'.
Default: '/'""")
).add_child(
safe_dom.Element('li').add_text("""
A file system location of course asset files. If location is left empty,
the course assets are stored in a datastore instead of the file system. A course
with assets in a datastore can be edited online. A course with assets on file
system must be re-deployed to Google App Engine manually.""")
).add_child(
safe_dom.Element('li').add_text("""
A course datastore namespace where course data is stored in App Engine.
Note: this value cannot be changed after the course is created."""))
).append(
safe_dom.Text(
'For example, consider the following two course entries:')
).append(safe_dom.Element('br')).append(
safe_dom.Element('blockquote').add_text(
'course:/cs101::/ns_cs101'
).add_child(
safe_dom.Element('br')
).add_text('course:/:/')
).append(
safe_dom.Element('p').add_text("""
Assuming you are hosting Course Builder on http:/www.example.com, the first
entry defines a course on a http://www.example.com/cs101 and both its assets
and student data are stored in the datastore namespace 'ns_cs101'. The second
entry defines a course hosted on http://www.example.com/, with its assets
stored in the '/' folder of the installation and its data stored in the default
empty datastore namespace.""")
).append(
safe_dom.Element('p').add_text("""
A line that starts with '#' is ignored. Course entries are applied in the
order they are defined.""")
), 'course:/:/:', multiline=True, validator=_courses_config_validator)
class ApplicationRequestHandler(webapp2.RequestHandler):
"""Handles dispatching of all URL's to proper handlers."""
# WARNING! never set this value to True, unless for the production load
# tests; setting this value to True will allow any anonymous third party to
# act as a Course Builder superuser
CAN_IMPERSONATE = False
# the name of the impersonation header
IMPERSONATE_HEADER_NAME = 'Gcb-Impersonate'
def dispatch(self):
if self.CAN_IMPERSONATE:
self.impersonate_and_dispatch()
else:
super(ApplicationRequestHandler, self).dispatch()
def impersonate_and_dispatch(self):
"""Dispatches request with user impersonation."""
impersonate_info = self.request.headers.get(
self.IMPERSONATE_HEADER_NAME)
if not impersonate_info:
super(ApplicationRequestHandler, self).dispatch()
return
impersonate_info = transforms.loads(impersonate_info)
email = impersonate_info.get('email')
user_id = impersonate_info.get('user_id')
def get_impersonated_user():
"""A method that returns impersonated user."""
try:
return users.User(email=email, _user_id=user_id)
except users.UserNotFoundError:
return None
old_get_current_user = users.get_current_user
try:
logging.info('Impersonating %s.', email)
users.get_current_user = get_impersonated_user
super(ApplicationRequestHandler, self).dispatch()
return
finally:
users.get_current_user = old_get_current_user
@classmethod
def bind_to(cls, urls, urls_map):
"""Recursively builds a map from a list of (URL, Handler) tuples."""
for url in urls:
path_prefix = url[0]
handler = url[1]
urls_map[path_prefix] = handler
# add child handlers
if hasattr(handler, 'get_child_routes'):
cls.bind_to(handler.get_child_routes(), urls_map)
@classmethod
def bind(cls, urls):
urls_map = {}
cls.bind_to(urls, urls_map)
cls.urls_map = urls_map
def get_handler(self):
"""Finds a course suitable for handling this request."""
course = get_course_for_current_request()
if not course:
return None
path = get_path_info()
if not path:
return None
return self.get_handler_for_course_type(
course, unprefix(path, course.get_slug()))
def can_handle_course_requests(self, context):
"""Reject all, but authors requests, to an unpublished course."""
return context.now_available or Roles.is_course_admin(context)
def _get_handler_factory_for_path(self, path):
"""Picks a handler to handle the path."""
# Checks if path maps in its entirety.
if path in ApplicationRequestHandler.urls_map:
return ApplicationRequestHandler.urls_map[path]
# Check if partial path maps. For now, let only zipserve.ZipHandler
# handle partial matches. We want to find the longest possible match.
parts = path.split('/')
candidate = None
partial_path = ''
for part in parts:
if part:
partial_path += '/' + part
if partial_path in ApplicationRequestHandler.urls_map:
handler = ApplicationRequestHandler.urls_map[partial_path]
if (
isinstance(handler, zipserve.ZipHandler) or
issubclass(handler, zipserve.ZipHandler)):
candidate = handler
return candidate
def get_handler_for_course_type(self, context, path):
"""Gets the right handler for the given context and path."""
if not self.can_handle_course_requests(context):
return None
# TODO(psimakov): Add docs (including args and returns).
norm_path = os.path.normpath(path)
# Handle static assets here.
if norm_path.startswith(GCB_ASSETS_FOLDER_NAME):
abs_file = abspath(context.get_home_folder(), norm_path)
handler = AssetHandler(self, abs_file)
handler.request = self.request
handler.response = self.response
handler.app_context = context
debug('Course asset: %s' % abs_file)
STATIC_HANDLER_COUNT.inc()
return handler
# Handle all dynamic handlers here.
handler_factory = self._get_handler_factory_for_path(path)
if handler_factory:
handler = handler_factory()
handler.app_context = context
handler.request = self.request
handler.response = self.response
# This variable represents the path after the namespace prefix is
# removed. The full path is still stored in self.request.path. For
# example, if self.request.path is '/new_course/foo/bar/baz/...',
# the path_translated would be '/foo/bar/baz/...'.
handler.path_translated = path
debug('Handler: %s > %s' % (path, handler.__class__.__name__))
DYNAMIC_HANDLER_COUNT.inc()
return handler
NO_HANDLER_COUNT.inc()
return None
def get(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.get()
finally:
count_stats(self)
unset_path_info()
def post(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.post()
finally:
count_stats(self)
unset_path_info()
def put(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.put()
finally:
count_stats(self)
unset_path_info()
def delete(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.delete()
finally:
count_stats(self)
unset_path_info()
def assert_mapped(src, dest):
try:
set_path_info(src)
course = get_course_for_current_request()
if not dest:
assert course is None
else:
assert course.get_slug() == dest
finally:
unset_path_info()
def assert_handled(src, target_handler):
try:
set_path_info(src)
app_handler = ApplicationRequestHandler()
# For unit tests to work we want all requests to be handled regardless
# of course.now_available flag value. Here we patch for that.
app_handler.can_handle_course_requests = lambda context: True
handler = app_handler.get_handler()
if handler is None and target_handler is None:
return None
assert isinstance(handler, target_handler)
return handler
finally:
unset_path_info()
def assert_fails(func):
success = False
try:
func()
success = True
except Exception: # pylint: disable=W0703
pass
if success:
raise Exception('Function \'%s\' was expected to fail.' % func)
def setup_courses(course_config):
"""Helper method that allows a test to setup courses on the fly."""
Registry.test_overrides[GCB_COURSES_CONFIG.name] = course_config
def reset_courses():
"""Cleanup method to complement setup_courses()."""
Registry.test_overrides[
GCB_COURSES_CONFIG.name] = GCB_COURSES_CONFIG.default_value
def test_unprefix():
assert unprefix('/', '/') == '/'
assert unprefix('/a/b/c', '/a/b') == '/c'
assert unprefix('/a/b/index.html', '/a/b') == '/index.html'
assert unprefix('/a/b', '/a/b') == '/'
def test_rule_validations():
"""Test rules validator."""
courses = get_all_courses(rules_text='course:/:/')
assert 1 == len(courses)
# Check comments.
setup_courses('course:/a:/nsa, course:/b:/nsb')
assert 2 == len(get_all_courses())
setup_courses('course:/a:/nsa, # course:/a:/nsb')
assert 1 == len(get_all_courses())
# Check slug collisions are not allowed.
setup_courses('course:/a:/nsa, course:/a:/nsb')
assert_fails(get_all_courses)
# Check namespace collisions are not allowed.
setup_courses('course:/a:/nsx, course:/b:/nsx')
assert_fails(get_all_courses)
# Check rule order is enforced. If we allowed any order and '/a' was before
# '/aa', the '/aa' would never match.
setup_courses('course:/a:/nsa, course:/aa:/nsaa, course:/aaa:/nsaaa')
assert_fails(get_all_courses)
# Check namespace names.
setup_courses('course:/a::/nsx')
assert_fails(get_all_courses)
# Check slug validity.
setup_courses('course:/a /b::nsa')
get_all_courses()
setup_courses('course:/a?/b::nsa')
assert_fails(get_all_courses)
# Cleanup.
reset_courses()
def test_rule_definitions():
"""Test various rewrite rule definitions."""
# Check that the default site is created when no rules are specified.
assert len(get_all_courses()) == 1
# Test one rule parsing.
setup_courses('course:/google/pswg:/sites/pswg')
rules = get_all_courses()
assert len(get_all_courses()) == 1
rule = rules[0]
assert rule.get_slug() == '/google/pswg'
assert rule.get_home_folder() == '/sites/pswg'
# Test two rule parsing.
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
assert len(get_all_courses()) == 2
# Test that two of the same slugs are not allowed.
setup_courses('foo:/a/b:/c/d, bar:/a/b:/c/d')
assert_fails(get_all_courses)
# Test that only 'course' is supported.
setup_courses('foo:/a/b:/c/d, bar:/e/f:/g/h')
assert_fails(get_all_courses)
# Cleanup.
reset_courses()
# Test namespaces.
set_path_info('/')
try:
setup_courses('course:/:/c/d')
assert ApplicationContext.get_namespace_name_for_request() == (
'gcb-course-c-d')
finally:
unset_path_info()
# Cleanup.
reset_courses()
def test_url_to_rule_mapping():
"""Tests mapping of a URL to a rule."""
# default mapping
assert_mapped('/favicon.ico', '/')
assert_mapped('/assets/img/foo.png', '/')
# explicit mapping
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
assert_mapped('/a/b', '/a/b')
assert_mapped('/a/b/', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/e/f', '/e/f')
assert_mapped('/e/f/assets', '/e/f')
assert_mapped('/e/f/views', '/e/f')
assert_mapped('e/f', None)
assert_mapped('foo', None)
# Cleanup.
reset_courses()
def test_url_to_handler_mapping_for_course_type():
"""Tests mapping of a URL to a handler for course type."""
# setup rules
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
# setup helper classes
class FakeHandler0(object):
def __init__(self):
self.app_context = None
class FakeHandler1(object):
def __init__(self):
self.app_context = None
class FakeHandler2(zipserve.ZipHandler):
def __init__(self):
self.app_context = None
class FakeHandler3(zipserve.ZipHandler):
def __init__(self):
self.app_context = None
class FakeHandler4(zipserve.ZipHandler):
def __init__(self):
self.app_context = None
# Setup handler.
handler0 = FakeHandler0
handler1 = FakeHandler1
handler2 = FakeHandler2
urls = [('/', handler0), ('/foo', handler1), ('/bar', handler2)]
ApplicationRequestHandler.bind(urls)
# Test proper handler mappings.
assert_handled('/a/b', FakeHandler0)
assert_handled('/a/b/', FakeHandler0)
assert_handled('/a/b/foo', FakeHandler1)
assert_handled('/a/b/bar', FakeHandler2)
# Test partial path match.
assert_handled('/a/b/foo/bee', None)
assert_handled('/a/b/bar/bee', FakeHandler2)
# Test assets mapping.
handler = assert_handled('/a/b/assets/img/foo.png', AssetHandler)
assert AbstractFileSystem.normpath(
handler.app_context.get_template_home()).endswith(
AbstractFileSystem.normpath('/c/d/views'))
# This is allowed as we don't go out of /assets/...
handler = assert_handled(
'/a/b/assets/foo/../models/models.py', AssetHandler)
assert AbstractFileSystem.normpath(handler.filename).endswith(
AbstractFileSystem.normpath('/c/d/assets/models/models.py'))
# This is not allowed as we do go out of /assets/...
assert_handled('/a/b/assets/foo/../../models/models.py', None)
# Test negative cases
assert_handled('/foo', None)
assert_handled('/baz', None)
# Site 'views' and 'data' are not accessible
assert_handled('/a/b/view/base.html', None)
assert_handled('/a/b/data/units.csv', None)
# Default mapping
reset_courses()
handler3 = FakeHandler3
handler4 = FakeHandler4
urls = [
('/', handler0),
('/foo', handler1),
('/bar', handler2),
('/zip', handler3),
('/zip/a/b', handler4)]
ApplicationRequestHandler.bind(urls)
# Positive cases
assert_handled('/', FakeHandler0)
assert_handled('/foo', FakeHandler1)
assert_handled('/bar', FakeHandler2)
handler = assert_handled('/assets/js/main.js', AssetHandler)
assert AbstractFileSystem.normpath(
handler.app_context.get_template_home()).endswith(
AbstractFileSystem.normpath('/views'))
# Partial URL matching cases test that the most specific match is found.
assert_handled('/zip', FakeHandler3)
assert_handled('/zip/a', FakeHandler3)
assert_handled('/zip/a/b', FakeHandler4)
assert_handled('/zip/a/b/c', FakeHandler4)
# Negative cases
assert_handled('/baz', None)
assert_handled('/favicon.ico', None)
assert_handled('/e/f/index.html', None)
assert_handled('/foo/foo.css', None)
# Clean up.
ApplicationRequestHandler.bind([])
def test_namespace_collisions_are_detected():
"""Test that namespace collisions are detected and are not allowed."""
setup_courses('foo:/a/b:/c/d, bar:/a/b:/c-d')
assert_fails(get_all_courses)
reset_courses()
def test_path_construction():
"""Checks that path_join() works correctly."""
# Test cases common to all platforms.
assert (os.path.normpath(path_join('/a/b', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b', 'c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', 'c')) ==
os.path.normpath('/a/b/c'))
# Windows-specific test cases.
drive, unused_path = os.path.splitdrive('c:\\windows')
if drive:
assert (os.path.normpath(path_join('/a/b', 'c:/d')) ==
os.path.normpath('/a/b/d'))
assert (os.path.normpath(path_join('/a/b/', 'c:/d')) ==
os.path.normpath('/a/b/d'))
def run_all_unit_tests():
assert not ApplicationRequestHandler.CAN_IMPERSONATE
test_namespace_collisions_are_detected()
test_unprefix()
test_rule_definitions()
test_url_to_rule_mapping()
test_url_to_handler_mapping_for_course_type()
test_path_construction()
test_rule_validations()
if __name__ == '__main__':
DEBUG_INFO = True
run_all_unit_tests()
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for generating various frontend pages."""
__author__ = 'Saifu Angto (saifu@google.com)'
import datetime
import urllib
import urlparse
from models import courses
from models import models
from models import student_work
from models import transforms
from models.counters import PerfCounter
from models.models import Student
from models.models import StudentProfileDAO
from models.review import ReviewUtils
from models.roles import Roles
from models.student_work import StudentWorkUtils
from modules.review import domain
from tools import verify
from utils import BaseHandler
from utils import BaseRESTHandler
from utils import CAN_PERSIST_ACTIVITY_EVENTS
from utils import CAN_PERSIST_PAGE_EVENTS
from utils import CAN_PERSIST_TAG_EVENTS
from utils import HUMAN_READABLE_DATETIME_FORMAT
from utils import TRANSIENT_STUDENT
from utils import XsrfTokenManager
from google.appengine.ext import db
COURSE_EVENTS_RECEIVED = PerfCounter(
'gcb-course-events-received',
'A number of activity/assessment events received by the server.')
COURSE_EVENTS_RECORDED = PerfCounter(
'gcb-course-events-recorded',
'A number of activity/assessment events recorded in a datastore.')
UNIT_PAGE_TYPE = 'unit'
ACTIVITY_PAGE_TYPE = 'activity'
def get_first_lesson(handler, unit_id):
"""Returns the first lesson in the unit."""
lessons = handler.get_course().get_lessons(unit_id)
return lessons[0] if lessons else None
def extract_unit_and_lesson(handler):
"""Loads unit and lesson specified in the request."""
# Finds unit requested or a first unit in the course.
u = handler.request.get('unit')
unit = handler.get_course().find_unit_by_id(u)
if not unit:
units = handler.get_course().get_units()
for current_unit in units:
if verify.UNIT_TYPE_UNIT == current_unit.type:
unit = current_unit
break
if not unit:
return None, None
# Find lesson requested or a first lesson in the unit.
l = handler.request.get('lesson')
lesson = None
if not l:
lesson = get_first_lesson(handler, unit.unit_id)
else:
lesson = handler.get_course().find_lesson_by_id(unit, l)
return unit, lesson
def get_unit_and_lesson_id_from_url(handler, url):
"""Extracts unit and lesson ids from a URL."""
url_components = urlparse.urlparse(url)
query_dict = urlparse.parse_qs(url_components.query)
if 'unit' not in query_dict:
return None, None
unit_id = query_dict['unit'][0]
lesson_id = None
if 'lesson' in query_dict:
lesson_id = query_dict['lesson'][0]
else:
lesson_id = get_first_lesson(handler, unit_id).lesson_id
return unit_id, lesson_id
def create_readonly_assessment_params(content, answers):
"""Creates parameters for a readonly assessment in the view templates."""
assessment_params = {
'preamble': content['assessment']['preamble'],
'questionsList': content['assessment']['questionsList'],
'answers': answers,
}
return assessment_params
class CourseHandler(BaseHandler):
"""Handler for generating course page."""
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [('/rest/events', EventsRESTHandler)]
def augment_assessment_units(self, student):
"""Adds additional fields to assessment units."""
course = self.get_course()
rp = course.get_reviews_processor()
for unit in self.template_value['units']:
if unit.type == 'A':
unit.needs_human_grader = course.needs_human_grader(unit)
if unit.needs_human_grader:
review_steps = rp.get_review_steps_by(
unit.unit_id, student.get_key())
review_min_count = unit.workflow.get_review_min_count()
unit.matcher = unit.workflow.get_matcher()
unit.review_progress = ReviewUtils.get_review_progress(
review_steps, review_min_count,
course.get_progress_tracker()
)
unit.is_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
def get(self):
"""Handles GET requests."""
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
self.template_value['has_global_profile'] = profile is not None
if not student:
student = TRANSIENT_STUDENT
if (student.is_transient and
not self.app_context.get_environ()['course']['browsable']):
self.redirect('/preview')
return
self.template_value['units'] = self.get_units()
self.template_value['show_registration_page'] = True
if student and not student.is_transient:
self.augment_assessment_units(student)
elif user:
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
additional_registration_fields = self.app_context.get_environ(
)['reg_form']['additional_registration_fields']
if profile is not None and not additional_registration_fields:
self.template_value['show_registration_page'] = False
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.template_value['transient_student'] = student.is_transient
self.template_value['progress'] = (
self.get_progress_tracker().get_unit_progress(student))
course = self.app_context.get_environ()['course']
self.template_value['video_exists'] = bool(
'main_video' in course and
'url' in course['main_video'] and
course['main_video']['url'])
self.template_value['image_exists'] = bool(
'main_image' in course and
'url' in course['main_image'] and
course['main_image']['url'])
self.template_value['is_progress_recorded'] = (
CAN_PERSIST_ACTIVITY_EVENTS.value)
self.template_value['navbar'] = {'course': True}
self.render('course.html')
class UnitHandler(BaseHandler):
"""Handler for generating unit page."""
def _show_activity_on_separate_page(self, lesson):
return lesson.activity and lesson.activity_listed
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
# Extract incoming args
unit, lesson = extract_unit_and_lesson(self)
unit_id = unit.unit_id
# If the unit is not currently available, and the user is not an admin,
# redirect to the main page.
if (not unit.now_available and
not Roles.is_course_admin(self.app_context)):
self.redirect('/')
return
# Set template values for nav bar and page type.
self.template_value['navbar'] = {'course': True}
self.template_value['page_type'] = UNIT_PAGE_TYPE
lessons = self.get_lessons(unit_id)
# Set template values for a unit and its lesson entities
self.template_value['unit'] = unit
self.template_value['unit_id'] = unit_id
self.template_value['lesson'] = lesson
if lesson:
self.template_value['objectives'] = lesson.objectives
self.template_value['lessons'] = lessons
# If this unit contains no lessons, return.
if not lesson:
self.render('unit.html')
return
lesson_id = lesson.lesson_id
self.template_value['lesson_id'] = lesson_id
# These attributes are needed in order to render questions (with
# progress indicators) in the lesson body. They are used by the
# custom component renderers in the assessment_tags module.
self.student = student
self.unit_id = unit_id
self.lesson_id = lesson_id
self.lesson_is_scored = lesson.scored
index = lesson.index - 1 # indexes are 1-based
# Format back button.
if index == 0:
self.template_value['back_button_url'] = ''
else:
prev_lesson = lessons[index - 1]
if self._show_activity_on_separate_page(prev_lesson):
self.template_value['back_button_url'] = (
'activity?unit=%s&lesson=%s' % (
unit_id, prev_lesson.lesson_id))
else:
self.template_value['back_button_url'] = (
'unit?unit=%s&lesson=%s' % (unit_id, prev_lesson.lesson_id))
# Format next button.
if self._show_activity_on_separate_page(lesson):
self.template_value['next_button_url'] = (
'activity?unit=%s&lesson=%s' % (
unit_id, lesson_id))
else:
if index >= len(lessons) - 1:
self.template_value['next_button_url'] = ''
else:
next_lesson = lessons[index + 1]
self.template_value['next_button_url'] = (
'unit?unit=%s&lesson=%s' % (
unit_id, next_lesson.lesson_id))
# Set template values for student progress
self.template_value['is_progress_recorded'] = (
CAN_PERSIST_ACTIVITY_EVENTS.value and not student.is_transient)
if CAN_PERSIST_ACTIVITY_EVENTS.value:
self.template_value['lesson_progress'] = (
self.get_progress_tracker().get_lesson_progress(
student, unit_id))
# Mark this page as accessed. This is done after setting the
# student progress template value, so that the mark only shows up
# after the student visits the page for the first time.
self.get_course().get_progress_tracker().put_html_accessed(
student, unit_id, lesson_id)
self.render('unit.html')
class ActivityHandler(BaseHandler):
"""Handler for generating activity page and receiving submissions."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
# Extract incoming args
unit, lesson = extract_unit_and_lesson(self)
unit_id = unit.unit_id
# If the unit is not currently available, and the user is not an admin,
# redirect to the main page.
if (not unit.now_available and
not Roles.is_course_admin(self.app_context)):
self.redirect('/')
return
# Set template values for nav bar and page type.
self.template_value['navbar'] = {'course': True}
self.template_value['page_type'] = ACTIVITY_PAGE_TYPE
lessons = self.get_lessons(unit_id)
# Set template values for a unit and its lesson entities
self.template_value['unit'] = unit
self.template_value['unit_id'] = unit_id
self.template_value['lesson'] = lesson
self.template_value['lessons'] = lessons
# If this unit contains no lessons, return.
if not lesson:
self.render('activity.html')
return
lesson_id = lesson.lesson_id
self.template_value['lesson_id'] = lesson_id
self.template_value['activity_script_src'] = (
self.get_course().get_activity_filename(unit_id, lesson_id))
index = lesson.index - 1 # indexes are 1-based
# Format back button.
self.template_value['back_button_url'] = (
'unit?unit=%s&lesson=%s' % (unit_id, lesson_id))
# Format next button.
if index >= len(lessons) - 1:
self.template_value['next_button_url'] = ''
else:
next_lesson = lessons[index + 1]
self.template_value['next_button_url'] = (
'unit?unit=%s&lesson=%s' % (
unit_id, next_lesson.lesson_id))
# Set template values for student progress
self.template_value['is_progress_recorded'] = (
CAN_PERSIST_ACTIVITY_EVENTS.value and not student.is_transient)
if CAN_PERSIST_ACTIVITY_EVENTS.value:
self.template_value['lesson_progress'] = (
self.get_progress_tracker().get_lesson_progress(
student, unit_id))
# Mark this page as accessed. This is done after setting the
# student progress template value, so that the mark only shows up
# after the student visits the page for the first time.
self.get_course().get_progress_tracker().put_activity_accessed(
student, unit_id, lesson_id)
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.render('activity.html')
class AssessmentHandler(BaseHandler):
"""Handler for generating assessment page."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
# Extract incoming args, binding to self if needed.
self.unit_id = self.request.get('name')
course = self.get_course()
unit = course.find_unit_by_id(self.unit_id)
if not unit:
self.error(404)
return
model_version = course.get_assessment_model_version(unit)
assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS
self.template_value['model_version'] = model_version
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
configure_readonly_view = self.configure_readonly_view_1_4
configure_active_view = self.configure_active_view_1_4
get_review_received = self.get_review_received_1_4
elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:
configure_readonly_view = self.configure_readonly_view_1_5
configure_active_view = self.configure_active_view_1_5
get_review_received = self.get_review_received_1_5
else:
raise ValueError('Bad assessment model version: %s' % model_version)
self.template_value['navbar'] = {'course': True}
self.template_value['unit_id'] = self.unit_id
self.template_value['assessment_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('assessment-post'))
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.template_value['grader'] = unit.workflow.get_grader()
readonly_view = False
due_date_exceeded = False
submission_due_date = unit.workflow.get_submission_due_date()
if submission_due_date:
self.template_value['submission_due_date'] = (
submission_due_date.strftime(HUMAN_READABLE_DATETIME_FORMAT))
time_now = datetime.datetime.now()
if time_now > submission_due_date:
readonly_view = True
due_date_exceeded = True
self.template_value['due_date_exceeded'] = True
if course.needs_human_grader(unit) and not student.is_transient:
self.template_value['matcher'] = unit.workflow.get_matcher()
rp = course.get_reviews_processor()
review_steps_by = rp.get_review_steps_by(
unit.unit_id, student.get_key())
# Determine if the student can see others' reviews of his/her work.
if (ReviewUtils.has_completed_enough_reviews(
review_steps_by, unit.workflow.get_review_min_count())):
submission_and_review_steps = (
rp.get_submission_and_review_steps(
unit.unit_id, student.get_key()))
submission_contents = submission_and_review_steps[0]
review_steps_for = submission_and_review_steps[1]
review_keys_for_student = []
for review_step in review_steps_for:
can_show_review = (
review_step.state == domain.REVIEW_STATE_COMPLETED
and not review_step.removed
and review_step.review_key
)
if can_show_review:
review_keys_for_student.append(review_step.review_key)
reviews_for_student = rp.get_reviews_by_keys(
unit.unit_id, review_keys_for_student)
self.template_value['reviews_received'] = [get_review_received(
unit, review) for review in reviews_for_student]
else:
submission_contents = student_work.Submission.get_contents(
unit.unit_id, student.get_key())
# Determine whether to show the assessment in readonly mode.
if submission_contents or due_date_exceeded:
readonly_view = True
configure_readonly_view(unit, submission_contents)
if not readonly_view:
submission_contents = None
if not student.is_transient:
submission_contents = student_work.Submission.get_contents(
unit.unit_id, student.get_key())
configure_active_view(unit, submission_contents)
self.render('assessment.html')
def configure_readonly_view_1_4(self, unit, submission_contents):
self.template_value['readonly_student_assessment'] = (
create_readonly_assessment_params(
self.get_course().get_assessment_content(unit),
StudentWorkUtils.get_answer_list(submission_contents)))
def configure_readonly_view_1_5(self, unit, submission_contents):
self.template_value['readonly_student_assessment'] = True
self.template_value['html_content'] = unit.html_content
self.template_value['html_saved_answers'] = transforms.dumps(
submission_contents)
def configure_active_view_1_4(self, unit, submission_contents):
self.template_value['assessment_script_src'] = (
self.get_course().get_assessment_filename(unit.unit_id))
if submission_contents:
# If a previous submission exists, reinstate it.
self.template_value['saved_answers'] = transforms.dumps(
StudentWorkUtils.get_answer_list(submission_contents))
def configure_active_view_1_5(self, unit, submission_contents):
self.template_value['html_content'] = unit.html_content
self.template_value['html_check_answers'] = unit.html_check_answers
if submission_contents:
# If a previous submission exists, reinstate it.
self.template_value['html_saved_answers'] = transforms.dumps(
submission_contents)
def get_review_received_1_4(self, unit, review):
return create_readonly_assessment_params(
self.get_course().get_review_form_content(unit),
StudentWorkUtils.get_answer_list(review))
def get_review_received_1_5(self, unit, review):
return {
'content': unit.html_review_form,
'saved_answers': transforms.dumps(review)
}
class ReviewDashboardHandler(BaseHandler):
"""Handler for generating the index of reviews that a student has to do."""
def populate_template(self, unit, review_steps):
"""Adds variables to the template for the review dashboard."""
self.template_value['assessment_name'] = unit.title
self.template_value['unit_id'] = unit.unit_id
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.template_value['review_dashboard_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('review-dashboard-post'))
self.template_value['REVIEW_STATE_COMPLETED'] = (
domain.REVIEW_STATE_COMPLETED)
self.template_value['review_steps'] = review_steps
self.template_value['review_min_count'] = (
unit.workflow.get_review_min_count())
review_due_date = unit.workflow.get_review_due_date()
if review_due_date:
self.template_value['review_due_date'] = review_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
time_now = datetime.datetime.now()
self.template_value['due_date_exceeded'] = (time_now > review_due_date)
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
course = self.get_course()
rp = course.get_reviews_processor()
unit, _ = extract_unit_and_lesson(self)
if not unit:
self.error(404)
return
self.template_value['navbar'] = {'course': True}
if not course.needs_human_grader(unit):
self.error(404)
return
# Check that the student has submitted the corresponding assignment.
if not rp.does_submission_exist(unit.unit_id, student.get_key()):
self.template_value['error_code'] = (
'cannot_review_before_submitting_assignment')
self.render('error.html')
return
review_steps = rp.get_review_steps_by(unit.unit_id, student.get_key())
self.populate_template(unit, review_steps)
required_review_count = unit.workflow.get_review_min_count()
# The student can request a new submission if:
# - all his/her current reviews are in Draft/Completed state, and
# - he/she is not in the state where the required number of reviews
# has already been requested, but not all of these are completed.
self.template_value['can_request_new_review'] = (
len(review_steps) < required_review_count or
ReviewUtils.has_completed_all_assigned_reviews(review_steps)
)
self.render('review_dashboard.html')
def post(self):
"""Allows a reviewer to request a new review."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(
self.request, 'review-dashboard-post'):
return
course = self.get_course()
unit, unused_lesson = extract_unit_and_lesson(self)
if not unit:
self.error(404)
return
rp = course.get_reviews_processor()
review_steps = rp.get_review_steps_by(unit.unit_id, student.get_key())
self.template_value['navbar'] = {'course': True}
if not course.needs_human_grader(unit):
self.error(404)
return
# Check that the student has submitted the corresponding assignment.
if not rp.does_submission_exist(unit.unit_id, student.get_key()):
self.template_value['error_code'] = (
'cannot_review_before_submitting_assignment')
self.render('error.html')
return
# Check that the review due date has not passed.
time_now = datetime.datetime.now()
review_due_date = unit.workflow.get_review_due_date()
if time_now > review_due_date:
self.template_value['error_code'] = (
'cannot_request_review_after_deadline')
self.render('error.html')
return
# Check that the student can request a new review.
review_min_count = unit.workflow.get_review_min_count()
can_request_new_review = (
len(review_steps) < review_min_count or
ReviewUtils.has_completed_all_assigned_reviews(review_steps))
if not can_request_new_review:
self.template_value['review_min_count'] = review_min_count
self.template_value['error_code'] = 'must_complete_more_reviews'
self.render('error.html')
return
self.template_value['no_submissions_available'] = True
try:
review_step_key = rp.get_new_review(unit.unit_id, student.get_key())
redirect_params = {
'key': review_step_key,
'unit': unit.unit_id,
}
self.redirect('/review?%s' % urllib.urlencode(redirect_params))
except Exception: # pylint: disable-msg=broad-except
review_steps = rp.get_review_steps_by(
unit.unit_id, student.get_key())
self.populate_template(unit, review_steps)
self.render('review_dashboard.html')
class ReviewHandler(BaseHandler):
"""Handler for generating the submission page for individual reviews."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
course = self.get_course()
rp = course.get_reviews_processor()
unit, unused_lesson = extract_unit_and_lesson(self)
if not course.needs_human_grader(unit):
self.error(404)
return
review_step_key = self.request.get('key')
if not unit or not review_step_key:
self.error(404)
return
try:
review_step_key = db.Key(encoded=review_step_key)
review_step = rp.get_review_steps_by_keys(
unit.unit_id, [review_step_key])[0]
except Exception: # pylint: disable-msg=broad-except
self.error(404)
return
if not review_step:
self.error(404)
return
# Check that the student is allowed to review this submission.
if not student.has_same_key_as(review_step.reviewer_key):
self.error(404)
return
model_version = course.get_assessment_model_version(unit)
assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS
self.template_value['model_version'] = model_version
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
configure_assessment_view = self.configure_assessment_view_1_4
configure_readonly_review = self.configure_readonly_review_1_4
configure_active_review = self.configure_active_review_1_4
elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:
configure_assessment_view = self.configure_assessment_view_1_5
configure_readonly_review = self.configure_readonly_review_1_5
configure_active_review = self.configure_active_review_1_5
else:
raise ValueError('Bad assessment model version: %s' % model_version)
self.template_value['navbar'] = {'course': True}
self.template_value['unit_id'] = unit.unit_id
self.template_value['key'] = review_step_key
submission_key = review_step.submission_key
submission_contents = student_work.Submission.get_contents_by_key(
submission_key)
configure_assessment_view(unit, submission_contents)
review_due_date = unit.workflow.get_review_due_date()
if review_due_date:
self.template_value['review_due_date'] = review_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
review_key = review_step.review_key
rev = rp.get_reviews_by_keys(
unit.unit_id, [review_key])[0] if review_key else None
time_now = datetime.datetime.now()
show_readonly_review = (
review_step.state == domain.REVIEW_STATE_COMPLETED or
time_now > review_due_date)
self.template_value['due_date_exceeded'] = (time_now > review_due_date)
if show_readonly_review:
configure_readonly_review(unit, rev)
else:
# Populate the review form,
configure_active_review(unit, rev)
self.template_value['assessment_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('review-post'))
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.render('review.html')
def configure_assessment_view_1_4(self, unit, submission_contents):
readonly_student_assessment = create_readonly_assessment_params(
self.get_course().get_assessment_content(unit),
StudentWorkUtils.get_answer_list(submission_contents))
self.template_value[
'readonly_student_assessment'] = readonly_student_assessment
def configure_assessment_view_1_5(self, unit, submission_contents):
self.template_value['html_review_content'] = unit.html_content
self.template_value['html_reviewee_answers'] = transforms.dumps(
submission_contents)
def configure_readonly_review_1_4(self, unit, review_contents):
readonly_review_form = create_readonly_assessment_params(
self.get_course().get_review_form_content(unit),
StudentWorkUtils.get_answer_list(review_contents))
self.template_value['readonly_review_form'] = readonly_review_form
def configure_readonly_review_1_5(self, unit, review_contents):
self.template_value['readonly_review_form'] = True
self.template_value['html_review_form'] = unit.html_review_form
self.template_value['html_review_answers'] = transforms.dumps(
review_contents)
def configure_active_review_1_4(self, unit, review_contents):
self.template_value['assessment_script_src'] = (
self.get_course().get_review_form_filename(unit.unit_id))
saved_answers = (
StudentWorkUtils.get_answer_list(review_contents)
if review_contents else [])
self.template_value['saved_answers'] = transforms.dumps(saved_answers)
def configure_active_review_1_5(self, unit, review_contents):
self.template_value['html_review_form'] = unit.html_review_form
self.template_value['html_review_answers'] = transforms.dumps(
review_contents)
def post(self):
"""Handles POST requests, when a reviewer submits a review."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'review-post'):
return
course = self.get_course()
rp = course.get_reviews_processor()
unit_id = self.request.get('unit_id')
unit = self.find_unit_by_id(unit_id)
if not unit or not course.needs_human_grader(unit):
self.error(404)
return
review_step_key = self.request.get('key')
if not review_step_key:
self.error(404)
return
try:
review_step_key = db.Key(encoded=review_step_key)
review_step = rp.get_review_steps_by_keys(
unit.unit_id, [review_step_key])[0]
except Exception: # pylint: disable-msg=broad-except
self.error(404)
return
# Check that the student is allowed to review this submission.
if not student.has_same_key_as(review_step.reviewer_key):
self.error(404)
return
self.template_value['navbar'] = {'course': True}
self.template_value['unit_id'] = unit.unit_id
# Check that the review due date has not passed.
time_now = datetime.datetime.now()
review_due_date = unit.workflow.get_review_due_date()
if time_now > review_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['review_due_date'] = (
review_due_date.strftime(HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = 'review_deadline_exceeded'
self.render('error.html')
return
mark_completed = (self.request.get('is_draft') == 'false')
self.template_value['is_draft'] = (not mark_completed)
review_payload = self.request.get('answers')
review_payload = transforms.loads(
review_payload) if review_payload else []
try:
rp.write_review(
unit.unit_id, review_step_key, review_payload, mark_completed)
course.update_final_grades(student)
except domain.TransitionError:
self.template_value['error_code'] = 'review_already_submitted'
self.render('error.html')
return
self.render('review_confirmation.html')
class EventsRESTHandler(BaseRESTHandler):
"""Provides REST API for an Event."""
def get(self):
"""Returns a 404 error; this handler should not be GET-accessible."""
self.error(404)
return
def post(self):
"""Receives event and puts it into datastore."""
COURSE_EVENTS_RECEIVED.inc()
can = (
CAN_PERSIST_ACTIVITY_EVENTS.value or
CAN_PERSIST_PAGE_EVENTS.value or
CAN_PERSIST_TAG_EVENTS.value)
if not can:
return
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, 'event-post', {}):
return
user = self.get_user()
if not user:
return
source = request.get('source')
payload_json = request.get('payload')
models.EventEntity.record(source, user, payload_json)
COURSE_EVENTS_RECORDED.inc()
self.process_event(user, source, payload_json)
def process_event(self, user, source, payload_json):
"""Processes an event after it has been recorded in the event stream."""
student = models.Student.get_enrolled_student_by_email(user.email())
if not student:
return
payload = transforms.loads(payload_json)
if 'location' not in payload:
return
source_url = payload['location']
if source == 'attempt-activity':
unit_id, lesson_id = get_unit_and_lesson_id_from_url(
self, source_url)
if unit_id is not None and lesson_id is not None:
self.get_course().get_progress_tracker().put_block_completed(
student, unit_id, lesson_id, payload['index'])
elif source == 'tag-assessment':
unit_id, lesson_id = get_unit_and_lesson_id_from_url(
self, source_url)
cpt_id = payload['instanceid']
if (unit_id is not None and lesson_id is not None and
cpt_id is not None):
self.get_course().get_progress_tracker(
).put_component_completed(
student, unit_id, lesson_id, cpt_id)
elif source == 'attempt-lesson':
# Records progress for scored lessons.
unit_id, lesson_id = get_unit_and_lesson_id_from_url(
self, source_url)
if unit_id is not None and lesson_id is not None:
self.get_course().get_progress_tracker().put_html_completed(
student, unit_id, lesson_id)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import logging
from models import courses
from models import models
from models import review
from models import student_work
from models import transforms
from models import utils
from models.models import Student
from models.models import StudentAnswersEntity
from tools import verify
from utils import BaseHandler
from utils import HUMAN_READABLE_DATETIME_FORMAT
from google.appengine.ext import db
def store_score(course, student, assessment_type, score):
"""Stores a student's score on a particular assessment.
Args:
course: the course containing the assessment.
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the result of the assessment, if appropriate.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = course.get_score(student, assessment_type)
# remember to cast to int for comparison
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_type, score)
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_type, new_answers, score):
"""Stores answer and updates user scores.
Args:
email: the student's email address.
assessment_type: the title of the assessment.
new_answers: the latest set of answers supplied by the student.
score: the numerical assessment score.
Returns:
the student instance.
"""
student = Student.get_enrolled_student_by_email(email)
course = self.get_course()
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_type, new_answers)
store_score(course, student, assessment_type, score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), transforms.dumps({
'type': 'assessment-%s' % assessment_type,
'values': new_answers, 'location': 'AnswerHandler'}))
return student
def get(self):
"""Handles GET requests.
This method is here because if a student logs out when on the
reviewed_assessment_confirmation page, that student is redirected to
the GET method of the corresponding handler. It might be a good idea to
merge this class with lessons.AssessmentHandler, which currently only
has a GET handler.
"""
self.redirect('/course')
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
course = self.get_course()
assessment_type = self.request.get('assessment_type')
if not assessment_type:
self.error(404)
logging.error('No assessment type supplied.')
return
unit = course.find_unit_by_id(assessment_type)
if unit is None or unit.type != verify.UNIT_TYPE_ASSESSMENT:
self.error(404)
logging.error('No assessment named %s exists.', assessment_type)
return
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_type
self.template_value['assessment_name'] = unit.title
self.template_value['is_last_assessment'] = (
course.is_last_assessment(unit))
# Convert answers from JSON to dict.
answers = self.request.get('answers')
answers = transforms.loads(answers) if answers else []
grader = unit.workflow.get_grader()
# Scores are not recorded for human-reviewed assignments.
score = 0
if grader == courses.AUTO_GRADER:
score = int(round(float(self.request.get('score'))))
# Record assessment transaction.
student = self.update_assessment_transaction(
student.key().name(), assessment_type, answers, score)
if grader == courses.HUMAN_GRADER:
rp = course.get_reviews_processor()
# Guard against duplicate submissions of a human-graded assessment.
previously_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
if not previously_submitted:
# Check that the submission due date has not passed.
time_now = datetime.datetime.now()
submission_due_date = unit.workflow.get_submission_due_date()
if time_now > submission_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['submission_due_date'] = (
submission_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = (
'assignment_deadline_exceeded')
self.render('error.html')
return
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
rp.start_review_process_for(
unit.unit_id, submission_key, student.get_key())
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
self.template_value['previously_submitted'] = previously_submitted
matcher = unit.workflow.get_matcher()
self.template_value['matcher'] = matcher
if matcher == review.PEER_MATCHER:
self.template_value['review_dashboard_url'] = (
'reviewdashboard?unit=%s' % unit.unit_id
)
self.render('reviewed_assessment_confirmation.html')
return
else:
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
# Save the submission in the datastore, overwriting the earlier
# version if it exists.
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
course.update_final_grades(student)
self.template_value['result'] = course.get_overall_result(student)
self.template_value['score'] = score
self.template_value['overall_score'] = course.get_overall_score(
student)
self.render('test_confirmation.html')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom Jinja2 filters used in Course Builder."""
__author__ = 'John Orr (jorr@google.com)'
import jinja2
from models import config
from models import models
from webapp2_extras import i18n
import safe_dom
import tags
CAN_USE_JINJA2_TEMPLATE_CACHE = config.ConfigProperty(
'gcb_can_use_jinja2_template_cache', bool, safe_dom.Text(
'Whether jinja2 can cache bytecode of compiled templates in memcache.'),
default_value=True)
def finalize(x):
"""A finalize method which will correctly handle safe_dom elements."""
if isinstance(x, safe_dom.Node) or isinstance(x, safe_dom.NodeList):
return jinja2.utils.Markup(x.sanitized)
return x
def js_string_raw(data):
"""Escape a string so that it can be put in a JS quote."""
if not isinstance(data, basestring):
return data
data = data.replace('\\', '\\\\')
data = data.replace('\r', '\\r')
data = data.replace('\n', '\\n')
data = data.replace('\b', '\\b')
data = data.replace('"', '\\"')
data = data.replace("'", "\\'")
data = data.replace('<', '\\u003c')
data = data.replace('>', '\\u003e')
data = data.replace('&', '\\u0026')
return data
def js_string(data):
return jinja2.utils.Markup(js_string_raw(data))
def get_gcb_tags_filter(handler):
def gcb_tags(data):
"""Apply GCB custom tags, if enabled. Otherwise pass as if by 'safe'."""
if not isinstance(data, basestring):
return data
if tags.CAN_USE_DYNAMIC_TAGS.value:
return jinja2.utils.Markup(tags.html_to_safe_dom(data, handler))
else:
return jinja2.utils.Markup(data)
return gcb_tags
def create_jinja_environment(loader, locale=None):
"""Create proper jinja environment."""
cache = None
if CAN_USE_JINJA2_TEMPLATE_CACHE.value:
prefix = 'jinja2:bytecode:%s:/' % models.MemcacheManager.get_namespace()
cache = jinja2.MemcachedBytecodeCache(
models.MemcacheManager, timeout=models.DEFAULT_CACHE_TTL_SECS,
prefix=prefix)
jinja_environment = jinja2.Environment(
autoescape=True, finalize=finalize, extensions=['jinja2.ext.i18n'],
bytecode_cache=cache, loader=loader)
jinja_environment.filters['js_string'] = js_string
if locale:
i18n.get_i18n().set_locale(locale)
jinja_environment.install_gettext_translations(i18n)
return jinja_environment
def get_template(template_name, dirs, locale=None, handler=None):
"""Sets up an environment and gets jinja template."""
jinja_environment = create_jinja_environment(
jinja2.FileSystemLoader(dirs), locale=locale)
jinja_environment.filters['gcb_tags'] = get_gcb_tags_filter(handler)
return jinja_environment.get_template(template_name)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapping from schema to backend properties."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
import collections
import json
from models.property import Property
from models.property import Registry
class SchemaField(Property):
"""SchemaField defines a simple field in REST API."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, hidden=False, editable=True,
extra_schema_dict_values=None):
Property.__init__(
self, name, label, property_type, select_data=select_data,
description=description, optional=optional,
extra_schema_dict_values=extra_schema_dict_values)
self._hidden = hidden
self._editable = editable
def get_json_schema_dict(self):
"""Get the JSON schema for this field."""
prop = {}
prop['type'] = self._property_type
if self._optional:
prop['optional'] = self._optional
if self._description:
prop['description'] = self._description
return prop
def _get_schema_dict(self, prefix_key):
"""Get Schema annotation dictionary for this field."""
if self._extra_schema_dict_values:
schema = self._extra_schema_dict_values
else:
schema = {}
schema['label'] = self._label
if self._hidden:
schema['_type'] = 'hidden'
elif not self._editable:
schema['_type'] = 'uneditable'
elif self._select_data and '_type' not in schema:
schema['_type'] = 'select'
if 'date' is self._property_type:
schema['dateFormat'] = 'Y/m/d'
schema['valueFormat'] = 'Y/m/d'
elif self._select_data:
choices = []
for value, label in self._select_data:
choices.append(
{'value': value, 'label': unicode(label)})
schema['choices'] = choices
if self._description:
schema['description'] = self._description
return [(prefix_key + ['_inputex'], schema)]
class FieldArray(SchemaField):
"""FieldArray is an array with object or simple items in the REST API."""
def __init__(
self, name, label, description=None, item_type=None,
extra_schema_dict_values=None):
super(FieldArray, self).__init__(
name, label, 'array', description=description,
extra_schema_dict_values=extra_schema_dict_values)
self._item_type = item_type
def get_json_schema_dict(self):
json_schema = super(FieldArray, self).get_json_schema_dict()
json_schema['items'] = self._item_type.get_json_schema_dict()
return json_schema
def _get_schema_dict(self, prefix_key):
dict_list = super(FieldArray, self)._get_schema_dict(prefix_key)
# pylint: disable-msg=protected-access
dict_list += self._item_type._get_schema_dict(prefix_key + ['items'])
# pylint: enable-msg=protected-access
return dict_list
class FieldRegistry(Registry):
"""FieldRegistry is an object with SchemaField properties in REST API."""
def add_sub_registry(
self, name, title=None, description=None, registry=None):
"""Add a sub registry to for this Registry."""
if not registry:
registry = FieldRegistry(title, description=description)
self._sub_registories[name] = registry
return registry
def get_json_schema_dict(self):
schema_dict = dict(self._registry)
schema_dict['properties'] = collections.OrderedDict()
for schema_field in self._properties:
schema_dict['properties'][schema_field.name] = (
schema_field.get_json_schema_dict())
for key in self._sub_registories.keys():
schema_dict['properties'][key] = (
self._sub_registories[key].get_json_schema_dict())
return schema_dict
def get_json_schema(self):
"""Get the json schema for this API."""
return json.dumps(self.get_json_schema_dict())
def _get_schema_dict(self, prefix_key):
"""Get schema dict for this API."""
title_key = list(prefix_key)
title_key.append('title')
schema_dict = [(title_key, self._title)]
if self._extra_schema_dict_values:
key = list(prefix_key)
key.append('_inputex')
schema_dict.append([key, self._extra_schema_dict_values])
base_key = list(prefix_key)
base_key.append('properties')
# pylint: disable-msg=protected-access
for schema_field in self._properties:
key = base_key + [schema_field.name]
schema_dict += schema_field._get_schema_dict(key)
# pylint: enable-msg=protected-access
for key in self._sub_registories.keys():
sub_registry_key_prefix = list(base_key)
sub_registry_key_prefix.append(key)
sub_registry = self._sub_registories[key]
# pylint: disable-msg=protected-access
for entry in sub_registry._get_schema_dict(sub_registry_key_prefix):
schema_dict.append(entry)
# pylint: enable-msg=protected-access
return schema_dict
def get_schema_dict(self):
"""Get schema dict for this API."""
return self._get_schema_dict(list())
def _add_entry(self, key_part_list, value, entity):
if len(key_part_list) == 1:
entity[key_part_list[0]] = value
return
key = key_part_list.pop()
if not entity.has_key(key):
entity[key] = {}
else:
assert type(entity[key]) == type(dict())
self._add_entry(key_part_list, value, entity[key])
def convert_json_to_entity(self, json_entry, entity):
assert type(json_entry) == type(dict())
for key in json_entry.keys():
if type(json_entry[key]) == type(dict()):
self.convert_json_to_entity(json_entry[key], entity)
else:
key_parts = key.split(':')
key_parts.reverse()
self._add_entry(key_parts, json_entry[key], entity)
def _get_field_value(self, key_part_list, entity):
if len(key_part_list) == 1:
if entity.has_key(key_part_list[0]):
return entity[key_part_list[0]]
return None
key = key_part_list.pop()
if entity.has_key(key):
return self._get_field_value(key_part_list, entity[key])
return None
def convert_entity_to_json_entity(self, entity, json_entry):
for schema_field in self._properties:
field_name = schema_field.name
field_name_parts = field_name.split(':')
field_name_parts.reverse()
value = self._get_field_value(field_name_parts, entity)
if type(value) != type(None):
json_entry[field_name] = value
for key in self._sub_registories.keys():
json_entry[key] = {}
self._sub_registories[key].convert_entity_to_json_entity(
entity, json_entry[key])
| Python |
"""Classes to build sanitized HTML."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import re
def escape(strg):
return cgi.escape(strg, quote=1).replace("'", ''').replace('`', '`')
class Node(object):
"""Base class for the sanitizing module."""
@property
def sanitized(self):
raise NotImplementedError()
def __str__(self):
return self.sanitized
class NodeList(object):
"""Holds a list of Nodes and can bulk sanitize them."""
def __init__(self):
self.list = []
def __len__(self):
return len(self.list)
def append(self, node):
assert node is not None, 'Cannot add an empty value to the node list'
self.list.append(node)
return self
@property
def sanitized(self):
sanitized_list = []
for node in self.list:
sanitized_list.append(node.sanitized)
return ''.join(sanitized_list)
def __str__(self):
return self.sanitized
class Text(Node):
"""Holds untrusted text which will be sanitized when accessed."""
def __init__(self, unsafe_string):
self._value = unsafe_string
@property
def sanitized(self):
return escape(self._value)
class Element(Node):
"""Embodies an HTML element which will be sanitized when accessed."""
_ALLOWED_NAME_PATTERN = re.compile('^[a-zA-Z][a-zA-Z0-9]*$')
_VOID_ELEMENTS = frozenset([
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'])
def __init__(self, tag_name, **attr):
"""Initializes an element with given tag name and attributes.
Tag name will be restricted to alpha chars, attribute names
will be quote-escaped.
Args:
tag_name: the name of the element, which must match
_ALLOWED_NAME_PATTERN.
**attr: the names and value of the attributes. Names must match
_ALLOWED_NAME_PATTERN and values will be quote-escaped.
"""
assert Element._ALLOWED_NAME_PATTERN.match(tag_name), (
'tag name %s is not allowed' % tag_name)
for attr_name in attr:
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
self._tag_name = tag_name
self._attr = attr
self._children = []
def add_attribute(self, **attr):
for attr_name, value in attr.items():
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
self._attr[attr_name] = value
return self
def add_child(self, node):
self._children.append(node)
return self
def add_children(self, node_list):
self._children += node_list.list
return self
def add_text(self, text):
return self.add_child(Text(text))
@property
def sanitized(self):
"""Santize the element and its descendants."""
assert Element._ALLOWED_NAME_PATTERN.match(self._tag_name), (
'tag name %s is not allowed' % self._tag_name)
buff = '<' + self._tag_name
for attr_name, value in sorted(self._attr.items()):
if attr_name == 'className':
attr_name = 'class'
if value is None:
value = ''
buff += ' %s="%s"' % (
attr_name, escape(value))
if self._children:
buff += '>'
for child in self._children:
buff += child.sanitized
buff += '</%s>' % self._tag_name
elif self._tag_name.lower() in Element._VOID_ELEMENTS:
buff += '/>'
else:
buff += '></%s>' % self._tag_name
return buff
class ScriptElement(Element):
"""Represents an HTML <script> element."""
def __init__(self, **attr):
super(ScriptElement, self).__init__('script', **attr)
def add_child(self, unused_node):
raise ValueError()
def add_children(self, unused_nodes):
raise ValueError()
def add_text(self, text):
"""Add the script body."""
class Script(Node):
def __init__(self, script):
self._script = script
@property
def sanitized(self):
if '</script>' in self._script:
raise ValueError('End script tag forbidden')
return self._script
self._children.append(Script(text))
class Entity(Node):
"""Holds an XML entity."""
ENTITY_PATTERN = re.compile('^&([a-zA-Z]+|#[0-9]+|#x[0-9a-fA-F]+);$')
def __init__(self, entity):
assert Entity.ENTITY_PATTERN.match(entity)
self._entity = entity
@property
def sanitized(self):
assert Entity.ENTITY_PATTERN.match(self._entity)
return self._entity
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for custom HTML tags."""
__author__ = 'John Orr (jorr@google.com)'
import inspect
import logging
import mimetypes
import os
import pkgutil
from xml.etree import cElementTree
import appengine_config
from common import schema_fields
from extensions import tags
import html5lib
from models import config
import webapp2
import safe_dom
CAN_USE_DYNAMIC_TAGS = config.ConfigProperty(
'gcb_can_use_dynamic_tags', bool, safe_dom.Text(
'Whether lesson content can make use of custom HTML tags such as '
'<gcb-youtube videoid="...">. If this is enabled some legacy content '
'may be rendered differently. '),
default_value=True)
DUPLICATE_INSTANCE_ID_MESSAGE = (
'Error processing custom HTML tag: duplicate tag id')
INVALID_HTML_TAG_MESSAGE = 'Invalid HTML tag'
class BaseTag(object):
"""Base class for the custom HTML tags."""
@classmethod
def name(cls):
return cls.__name__
@classmethod
def vendor(cls):
return cls.__module__
@classmethod
def required_modules(cls):
"""Lists the inputEx modules required by the editor."""
return []
def render(self, unused_node, unused_handler):
"""Receive a node and return a node."""
return cElementTree.XML('<div>[Unimplemented custom tag]</div>')
def get_icon_url(self):
"""Return the URL for the icon to be displayed in the rich text editor.
Images should be placed in a folder called 'resources' inside the main
package for the tag definitions.
Returns:
the URL for the icon to be displayed in the editor.
"""
return """
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs
4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90EGgAIFHpT6h
8AAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAC30lEQVRo3u1ZP2sqQRCfVVGUXC
FqoZAmbSBYxFikMojBD2ErkgdC/AxpAn4A2wRMKptgCrWwSApBEG2DCidcI0gIxogXnXnFI5I87y6Jd6
seOHDN7LL7+83u/Nk5hoh/wMTCEJHMTMDGGDMzfrCAyWVL4DdCZLy72YwCxhgDIoKXlxcQRREeHx9BFE
WYTqfg9XohGAxCKBSCnZ0dcDqdhlrFEKlWq8QYIwD49ovFYjQajYiICBF17auLACLSbDaj3d3dObizsz
Nqt9v09PRE8Xhck0gul9NtONADnojI7XbPAXW73YV55XJZk8TFxcX6TuDk5GQORBAE1StxeXmpSaJery
99lWBZ69dqtQUgpVJJcW6/39cksL+/v/oTiEajC0DsdjvNZjPF+Q6HQ5PEsrJ0Huj1egs6WZbh+flZcX
4kEtFcr1KprDaRybKsqL++vlbU+/1+zfVEUVwtAZ/Pp6h/f39X1COi5nqBQGC1iaxUKine5eFwqDg/Fo
tx8QFdYfTm5uYLiPv7e0JExZD4OV/8/+3t7a0vkcmyTJIk0Xg8Vs0Dr6+vmta/vb1dbR74rTw8PKiCPz
09XV8m/qmEQiFF8IeHh7oLOq4EEJGazaam5ddajf5ElKJPNps1BDxXAohIjUbjC3CPx0OTycTQfbiewO
f3QDKZ5LIHVwIf4PP5vGFXZmUErq6uCAAok8lw9TFuBFKp1LxE4GF53eX0d10KSZLg+Pj4X/+SY/ePCw
HGGIzHYzg6OuLfG+W18MHBAYTDYf7daeLRLtv2RrcE9DdvC4UC5PN5mE6n3DvGhtU+RETn5+cLxVsikT
BHIru7u1N9uKTTaS4EDItCiAhWq1V13OVywWg02lwfGA6HmuNvb2+b7cQWi8XcUUgQBPB6varjWmMbE0
Y7nY5q4VYsFs0RRvv9PgmCMI8+VquVWq0WtzBqaC308bMPAGAwGAAiqvZQt8XcthbaELGZ/AbBX0kdVa
SPB+uxAAAAAElFTkSuQmCC
"""
def get_schema(self, unused_handler):
"""Return the list of fields which will be displayed in the editor.
This method assembles the list of fields which will be displayed in
the rich text editor when a user double-clicks on the icon for the tag.
The fields are a list of SchemaField objects in a FieldRegistry
container. Each SchemaField has the actual attribute name as used in
the tag, the display name for the form, and the type (usually
string).
Returns:
the list of fields to be displayed in the editor.
"""
reg = schema_fields.FieldRegistry('Unimplemented Custom Tag')
return reg
def unavailable_schema(self, message):
"""Utility to generate a schema for a "not available" message."""
reg = schema_fields.FieldRegistry(self.name())
reg.add_property(
schema_fields.SchemaField(
'unused_id', '', 'string', optional=True,
editable=False, extra_schema_dict_values={
'value': message,
'visu': {
'visuType': 'funcName',
'funcName': 'disableSave'}}))
return reg
class ResourcesHandler(webapp2.RequestHandler):
"""Content handler for resources associated with custom tags."""
def get(self):
"""Respond to HTTP GET methods."""
path = self.request.path
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
if os.path.basename(os.path.dirname(path)) != 'resources':
self.error(404)
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
self.response.cache_control.no_cache = None
self.response.cache_control.public = 'public'
self.response.cache_control.max_age = 600
stream = open(resource_file)
self.response.write(stream.read())
except IOError:
self.error(404)
class EditorBlacklists(object):
"""Lists tags which should not be supported by various editors."""
COURSE_SCOPE = set()
ASSESSMENT_SCOPE = set()
@classmethod
def register(cls, tag_name, editor_set):
editor_set.add(tag_name)
@classmethod
def unregister(cls, tag_name, editor_set):
if tag_name in editor_set:
editor_set.remove(tag_name)
class Registry(object):
"""A class that holds all dynamically registered tags."""
_bindings = {}
@classmethod
def add_tag_binding(cls, tag_name, clazz):
"""Registers a tag name to class binding."""
cls._bindings[tag_name] = clazz
@classmethod
def remove_tag_binding(cls, tag_name):
"""Unregisters a tag binding."""
if tag_name in cls._bindings:
del cls._bindings[tag_name]
@classmethod
def get_all_tags(cls):
return dict(cls._bindings.items())
def get_tag_bindings():
"""Return the bindings of tag names to implementing classes.
Tag bindings work by looking for classes which extend BaseTag and which
belong to packages inside extensions/tags. The tag name is then composed
from the package name and the class name, after lower-casing and separated
with a dash. E.g., the class
extensions.tags.gcb.YouTube
is bound to the tag name gcb-youtube.
Returns:
the bindings of tag names to implementing classes.
"""
bindings = {}
for loader, name, ispkg in pkgutil.walk_packages(tags.__path__):
if ispkg:
mod = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(mod, inspect.isclass):
if issubclass(clazz, BaseTag):
tag_name = ('%s-%s' % (mod.__name__, name)).lower()
bindings[tag_name] = clazz
return dict(bindings.items() + Registry.get_all_tags().items())
def html_string_to_element_tree(html_string):
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),
namespaceHTMLElements=False)
return parser.parseFragment('<div>%s</div>' % html_string)[0]
def html_to_safe_dom(html_string, handler):
"""Render HTML text as a tree of safe_dom elements."""
tag_bindings = get_tag_bindings()
node_list = safe_dom.NodeList()
if not html_string:
return node_list
def _generate_error_message_node_list(elt, error_message):
"""Generates a node_list representing an error message."""
logging.error(
'[%s, %s]: %s.', elt.tag, dict(**elt.attrib), error_message)
node_list = safe_dom.NodeList()
node_list.append(safe_dom.Element(
'span', className='gcb-error-tag'
).add_text(error_message))
if elt.tail:
node_list.append(safe_dom.Text(elt.tail))
return node_list
def _process_html_tree(elt, used_instance_ids):
# Return immediately with an error message if a duplicate instanceid is
# detected.
if 'instanceid' in elt.attrib:
if elt.attrib['instanceid'] in used_instance_ids:
return _generate_error_message_node_list(
elt, DUPLICATE_INSTANCE_ID_MESSAGE)
used_instance_ids.add(elt.attrib['instanceid'])
# Otherwise, attempt to parse this tag and all its child tags.
original_elt = elt
try:
if elt.tag in tag_bindings:
elt = tag_bindings[elt.tag]().render(elt, handler)
if elt.tag.lower() == 'script':
out_elt = safe_dom.ScriptElement()
else:
out_elt = safe_dom.Element(elt.tag)
out_elt.add_attribute(**elt.attrib)
if elt.text:
out_elt.add_text(elt.text)
for child in elt:
out_elt.add_children(
_process_html_tree(child, used_instance_ids))
node_list = safe_dom.NodeList()
node_list.append(out_elt)
if original_elt.tail:
node_list.append(safe_dom.Text(original_elt.tail))
return node_list
except Exception as e: # pylint: disable-msg=broad-except
return _generate_error_message_node_list(
original_elt, '%s: %s' % (INVALID_HTML_TAG_MESSAGE, e))
root = html_string_to_element_tree(html_string)
if root.text:
node_list.append(safe_dom.Text(root.text))
used_instance_ids = set([])
for elt in root:
node_list.append(_process_html_tree(elt, used_instance_ids))
return node_list
def get_components_from_html(html):
"""Returns a list of dicts representing the components in a lesson.
Args:
html: a block of html that may contain some HTML tags representing
custom components.
Returns:
A list of dicts. Each dict represents one component and has two
keys:
- instanceid: the instance id of the component
- cpt_name: the name of the component tag (e.g. gcb-googlegroup)
"""
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),
namespaceHTMLElements=False)
content = parser.parseFragment('<div>%s</div>' % html)[0]
components = []
for component in content.findall('.//*[@instanceid]'):
component_dict = {'cpt_name': component.tag}
component_dict.update(component.attrib)
components.append(component_dict)
return components
| Python |
SELLER_ID = "ADD YOUR SELLER ID"
SELLER_SECRET = "ADD YOUR SELLER SECRET"
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
# pylint: disable-msg=C6409,C6203
"""In-App Payments - Online Store Python Sample"""
# standard library imports
from cgi import escape
import os
import time
# third-party imports
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import jwt
# application-specific imports
from sellerinfo import SELLER_ID
from sellerinfo import SELLER_SECRET
class MainHandler(webapp.RequestHandler):
"""Handles /"""
def get(self):
"""Handles get requests."""
curr_time = int(time.time())
exp_time = curr_time + 3600
request_info = {'currencyCode': 'USD',
'sellerData': 'Custom Data'}
jwt_info = {'iss': SELLER_ID,
'aud': 'Google',
'typ': 'google/payments/inapp/item/v1',
'iat': curr_time,
'exp': exp_time,
'request': request_info}
# create JWT for first item
request_info.update({'name': 'Drive In Aniversary Poster', 'price': '20.00'})
token_1 = jwt.encode(jwt_info, SELLER_SECRET)
# create JWT for second item
request_info.update({'name': 'Golden Gate Bridge Poster', 'price': '25.00'})
token_2 = jwt.encode(jwt_info, SELLER_SECRET)
# update store web page
template_vals = {'jwt_1': token_1,
'jwt_2': token_2}
path = os.path.join(os.path.dirname(__file__), 'templates', 'index.html')
self.response.out.write(template.render(path, template_vals))
class PostbackHandler(webapp.RequestHandler):
"""Handles server postback - received at /postback"""
def post(self):
"""Handles post request."""
encoded_jwt = self.request.get('jwt', None)
if encoded_jwt is not None:
# jwt.decode won't accept unicode, cast to str
# http://github.com/progrium/pyjwt/issues/4
decoded_jwt = jwt.decode(str(encoded_jwt), SELLER_SECRET)
# validate the payment request and respond back to Google
if decoded_jwt['iss'] == 'Google' and decoded_jwt['aud'] == SELLER_ID:
if ('response' in decoded_jwt and
'orderId' in decoded_jwt['response'] and
'request' in decoded_jwt):
order_id = decoded_jwt['response']['orderId']
request_info = decoded_jwt['request']
if ('currencyCode' in request_info and 'sellerData' in request_info
and 'name' in request_info and 'price' in request_info):
# optional - update local database
# respond back to complete payment
self.response.out.write(order_id)
application = webapp.WSGIApplication([
('/', MainHandler),
('/postback', PostbackHandler),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
""" JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def header(jwt):
header_segment = jwt.split('.', 1)[0]
try:
return json.loads(base64url_decode(header_segment))
except (ValueError, TypeError):
raise DecodeError("Invalid header encoding")
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
ascii_key = unicode(key).encode('utf8')
signature = signing_methods[algorithm](signing_input, ascii_key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
ascii_key = unicode(key).encode('utf8')
if not signature == signing_methods[header['alg']](signing_input, ascii_key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various initialization routines.
Many of these are HACKs that must be called very early in the application.
That's also why I have to be so careful about the order of imports.
"""
import os
import sys
def fix_sys_path():
"""Setup the correct sys.path."""
third_party = os.path.join(os.path.dirname(__file__), os.pardir, 'third-party')
if third_party not in sys.path:
sys.path.insert(0, third_party)
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IrrduinoServer
This is the server component of Irrduino. It's a Python Google App Engine
application.
"""
from irrduinoserver import initialization
initialization.fix_sys_path()
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from irrduinoserver.routes import ROUTES
def main():
"""Run the WSGI application."""
application = webapp.WSGIApplication(ROUTES, debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the welcome page for the app."""
from google.appengine.ext import webapp
from irrduinoserver.utils import web as webutils
from irrduinoserver.utils import irrduino as irrduinoutils
from irrduinoserver.utils import ui as uiutils
SECS_PER_MINUTE = 60
MAX_TIME_MINS = 10
MIN_TIME_SECS = 1
MAX_TIME_SECS = MAX_TIME_MINS * SECS_PER_MINUTE
class IrrigateHandler(webapp.RequestHandler):
def get(self, template_params=None):
if template_params is None:
template_params = {}
template_params["tabs"] = uiutils.generate_tabs("irrigate")
template_params["zones"] = sorted(irrduinoutils.ZONES.items())
template_params["secs_and_mins"] = \
[(mins * SECS_PER_MINUTE, mins)
for mins in xrange(1, MAX_TIME_MINS + 1)]
webutils.render_to_response(self, "irrigate.html", template_params)
def post(self):
"""Control the sprinklers.
Use assertions for IrrduinoController errors and ValueError exceptions for
unexpected user input errors.
"""
if self.request.get("get-system-status"):
response = irrduinoutils.execute("/status")
assert response
elif self.request.get("water-zone"):
zone = int(self.request.get("zone"))
secs = int(self.request.get("secs"))
if not zone in irrduinoutils.ZONES:
raise ValueError("Invalid zone: %s" % zone)
if not (MIN_TIME_SECS <= secs <= MAX_TIME_SECS):
raise ValueError("Invalid secs: %s" % secs)
response = irrduinoutils.execute("/zone%s/on/%ss" % (zone, secs))
assert response["zone%s" % zone] == "on"
assert int(response["time"]) == secs
elif self.request.get("turn-off-everything"):
response = irrduinoutils.execute("/off")
assert response["zones"] == "ALL OFF"
assert response["zones"] == "ALL OFF"
else:
raise ValueError("Invalid submit button")
self.get({"status": response})
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is like FarmVille, but it actually works."""
from google.appengine.ext import webapp
from irrduinoserver.utils import web as webutils
from irrduinoserver.utils import irrduino as irrduinoutils
class LawnVilleHandler(webapp.RequestHandler):
def get(self):
template_params = {}
template_params["zones"] = sorted(irrduinoutils.ZONES.items())
webutils.render_to_response(self, "lawnville.html", template_params) | Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This handles the about page for the app."""
from google.appengine.ext import webapp
from irrduinoserver.utils import web as webutils
from irrduinoserver.utils import ui as uiutils
class AboutHandler(webapp.RequestHandler):
def get(self):
template_params = {}
template_params["tabs"] = uiutils.generate_tabs("about")
webutils.render_to_response(self, "about.html", template_params) | Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output log information."""
from google.appengine.ext import webapp
from irrduinoserver import model
from irrduinoserver.utils import web as webutils
from irrduinoserver.utils import irrduino as irrduinoutils
from irrduinoserver.utils import ui as uiutils
class LogHandler(webapp.RequestHandler):
def get(self):
"""Give the user information about the zone runs.
This also supports ?format=JSON.
"""
template_params = {}
template_params["tabs"] = uiutils.generate_tabs("log")
template_params["zone_runs"] = model.get_recent_zone_runs(
num_zone_runs_to_show=16)
for zone_run in template_params["zone_runs"]:
zone_run.localized_date = uiutils.localize_date(zone_run.created_at)
if webutils.is_format_json(self):
template_params["zone_runs"] = map(
webutils.entity_to_dict, template_params["zone_runs"])
webutils.render_json_to_response(self, template_params)
else:
webutils.render_to_response(self, "log.html", template_params)
def post(self):
"""Accept data from IrrduinoController.
Store it in the datastore and just respond "OK".
"""
try:
zone = int(self.request.get("zone"))
if zone not in irrduinoutils.ZONES:
raise ValueError("Invalid zone: %s" % zone)
runtime = int(self.request.get("runtime"))
if runtime <= 0:
raise ValueError("runtime out of range: %s" % runtime)
except (ValueError, TypeError), e:
webutils.error_response(self, msg="Invalid request: %r" % e)
else:
zone_run = model.ZoneRun(zone=zone, runtime_seconds=runtime)
zone_run.put()
self.response.out.write("OK") | Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle reporting."""
from google.appengine.ext import webapp
from irrduinoserver import model
from irrduinoserver.utils import web as webutils
from irrduinoserver.utils import irrduino as irrduinoutils
from irrduinoserver.utils import ui as uiutils
MINS_PER_SEC = 1 / 60.0
class ReportsHandler(webapp.RequestHandler):
def get(self):
"""Give the user information about the zone runs."""
template_params = {}
template_params["tabs"] = uiutils.generate_tabs("reports")
zone_runs = model.get_recent_zone_runs()
# Shuffle the data into:
# organized_by_date[date][nth_zone] = gallons
organized_by_date = {}
for zone_run in zone_runs:
created_at = zone_run.created_at
# Python months are 1-based, whereas Google Chart Tools expects them to
# be 0-based.
date = (created_at.year, created_at.month - 1, created_at.day)
if not date in organized_by_date:
organized_by_date[date] = [0] * len(irrduinoutils.ZONES)
zone_data = irrduinoutils.ZONES.get(zone_run.zone)
# You can tell IrrduinoController to water a zone even if that zone
# isn't hooked up. Ignore such records.
if zone_data is None:
continue
gallons = (zone_run.runtime_seconds * MINS_PER_SEC *
zone_data["gallons_per_minute"])
organized_by_date[date][zone_data["nth"]] += gallons
# Shuffle the data into:
# "[[new Date(year, month, day), zone0_gallons, ...],
# ...]"
date_gallons_per_zone_list = []
sorted_organized_by_date_items = sorted(organized_by_date.items())
for ((year, month, day), gallons_per_zone) in \
sorted_organized_by_date_items:
gallons_per_zone_str = ", ".join(map(str, gallons_per_zone))
date_gallons_per_zone_list.append("[new Date(%s, %s, %s), %s]" %
(year, month, day, gallons_per_zone_str))
# Shuffle the data into:
# "[[new Date(year, month, day), cost], ...]"
date_cost_list = []
for ((year, month, day), gallons_per_zone) in \
sorted_organized_by_date_items:
gallons = sum(gallons_per_zone)
cost = (gallons * irrduinoutils.CUBIC_FEET_PER_GALLON *
irrduinoutils.COST_PER_CUBIC_FOOT)
date_cost_list.append("[new Date(%s, %s, %s), %s]" %
(year, month, day, cost))
template_params["zones"] = sorted(irrduinoutils.ZONES.items())
template_params["water_usage_rows"] = \
"[%s]" % ",\n".join(date_gallons_per_zone_list)
template_params["water_cost_rows"] = \
"[%s]" % ",\n".join(date_cost_list)
webutils.render_to_response(self, "reports.html", template_params)
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the routing for the application."""
from irrduinoserver.handlers.abouthandler import AboutHandler
from irrduinoserver.handlers.lawnvillehandler import LawnVilleHandler
from irrduinoserver.handlers.reportshandler import ReportsHandler
from irrduinoserver.handlers.irrigatehandler import IrrigateHandler
from irrduinoserver.handlers.loghandler import LogHandler
ROUTES = [
('/', IrrigateHandler),
('/reports', ReportsHandler),
('/log', LogHandler),
('/about', AboutHandler),
('/lawnville', LawnVilleHandler),
] | Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains stuff related to the datastore."""
from google.appengine.ext import db
class ZoneRun(db.Model):
"""A zone run is when you water a zone for some amount of time."""
zone = db.IntegerProperty()
runtime_seconds = db.IntegerProperty()
created_at = db.DateTimeProperty(auto_now_add=True)
def get_recent_zone_runs(num_zone_runs_to_show=100):
return list(ZoneRun.gql(
"ORDER BY created_at DESC LIMIT %s" % num_zone_runs_to_show)) | Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains helpers related to rendering, etc."""
import datetime
import os
import pdb
import simplejson
import sys
import time
from google.appengine.ext import db
from google.appengine.ext.webapp import template
import httplib2
HTTP_TIMEOUT = 10
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
JSON_INDENTATION = " " * 2
def render_to_string(template_name, template_params=None):
"""Render a template and return it as a string."""
if template_params is None:
template_params = {}
path = os.path.join(os.path.dirname(__file__), os.pardir, 'templates',
template_name)
return template.render(path, template_params)
def render_to_response(handler, template_name, template_params=None,
content_type=None):
"""Render a template and write it to handler.response.out.
Optionally, set the "Content-Type" header.
"""
if content_type is not None:
handler.response.headers['Content-Type'] = content_type
handler.response.out.write(
render_to_string(template_name, template_params))
def error_response(handler, status=400, msg="Error", content_type="text/plain"):
handler.response.set_status(status)
handler.response.headers['Content-Type'] = content_type
handler.response.out.write(msg)
def render_json_to_response(handler, obj):
"""Serialize obj to JSON and write it out to the response stream."""
handler.response.headers['Content-Type'] = 'application/json'
handler.response.out.write(simplejson.dumps(obj, indent=JSON_INDENTATION))
def rescue_default(callback, default=""):
"""Call callback. If there's an exception, return default.
It's convenient to use lambda to wrap the expression in order to
create a callback. Only catch IndexError, AttributeError, and ValueError.
"""
try:
return callback()
except (IndexError, AttributeError, ValueError):
return default
def require_params(handler, params, errors=None):
if errors is None:
errors = []
for param in params:
if not handler.request.get(param):
errors.append("Please enter a value for %s." % param)
return errors
def pdb_set_trace():
"""This is how to use pdb with Google App Engine.
You'll need to restart the server after using this. Sorry.
"""
for attr in ('stdin', 'stdout', 'stderr'):
setattr(sys, attr, getattr(sys, '__%s__' % attr))
pdb.set_trace()
def create_http_with_timeout():
"""Call httplib2.Http and set a timeout."""
return httplib2.Http(timeout=HTTP_TIMEOUT)
def is_format_json(handler):
"""Return True if the requested format is "JSON"."""
return handler.request.get("format").lower() == "json"
def entity_to_dict(entity):
"""Convert a data entity to a dict.
Taken from: http://stackoverflow.com/questions/1531501/json-serialization-of-google-app-engine-models
"""
output = {}
for key, prop in entity.properties().iteritems():
value = getattr(entity, key)
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple()) * 1000
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(value, db.Model):
output[key] = to_dict(value)
else:
raise ValueError('Cannot encode: %r' % repr(prop))
return output
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains helpers related to controlling Irrduino.
See irrduino/IrrduinoController/web-ctrl-api.txt for documentation about
talking to the server.
"""
import simplejson
from irrduinoserver.utils import web as webutils
SERVER_ROOT = "http://YOUR-SERVER-NAME"
# See: https://docs.google.com/a/google.com/spreadsheet/ccc?key=0AuX1PmdkirJmdGNWRlpOTDY3WjVNUkczR2pMVGtnS1E&hl=en_US#gid=0
ZONES = {
1: {"nth": 0, "location": "Back Yard", "name": "Garden",
"gallons_per_minute": 1.50,
"coordinates": "602,198,782,267,781,282,735,301,556,226,557,212",
"center": (670, 244)},
2: {"nth": 1, "location": "Back Yard", "name": "Lawn 1",
"gallons_per_minute": 2.99,
"coordinates": "497,210,793,329,726,363,428,238,472,222",
"center": (615, 279)},
3: {"nth": 2, "location": "Back Yard", "name": "Lawn 2",
"gallons_per_minute": 5.46,
"coordinates": "421,242,721,364,647,398,349,271",
"center": (531, 311)},
4: {"nth": 3, "location": "Back Yard", "name": "Lawn 3",
"gallons_per_minute": 5.46, "coordinates":
"574,426,536,409,535,383,474,382,313,318,313,284,345,269,641,397",
"center": (463, 343)},
5: {"nth": 4, "location": "Back Yard", "name": "Patio Plants",
"gallons_per_minute": 0.52,
"coordinates": "535,429,528,447,487,468,449,446,447,394,477,390,528,411",
"center": (515, 451)},
7: {"nth": 5, "location": "Front Yard", "name": "Left Side Lawn",
"gallons_per_minute": 6.13,
"coordinates": "148,354,294,410,148,475,2,413",
"center": (151, 404)},
8: {"nth": 6, "location": "Front Yard", "name": "Right Side Lawn",
"gallons_per_minute": 6.13,
"coordinates": "300,413,446,471,292,532,150,476",
"center": (304, 465)}
}
COST_PER_CUBIC_FOOT = 0.0252
CUBIC_FEET_PER_GALLON = 0.134
def execute(path):
"""Do an HTTP GET to control the Arduino board."""
assert path.startswith("/")
http = webutils.create_http_with_timeout()
(headers, body) = http.request(SERVER_ROOT + path)
if headers["status"] != 200:
raise RuntimeError("Irrduino error", headers, body)
json = simplejson.loads(body)
return json
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains helpers related to user interface rendering."""
from datetime import timedelta
def generate_tabs(tab_name="watering"):
"""Take a parameter for a selected tab and generate a tab list."""
tabs = []
for (name, url) in (
("Irrigate", "/"),
("Log", "/log"),
("Reports", "/reports"),
("About", "/about")
):
"""
,("LawnVille", "javascript:window.open('/lawnville', 'lawnville', 'width=800, height=600, status=no, toolbar=no, menubar=no, location=no, resizable=no, scrollbars=no')")
"""
selected = ""
if tab_name.lower() == name.lower():
selected = " class=selected"
tabs.append('<li%s><a href="%s">%s</a></li>' % (selected, url, name))
return "".join(tabs)
def localize_date(date):
""" a quick and dirty hack for displaying time in PST
This solution breaks as soon as we hit daylight savings.
"""
return date + timedelta(hours=-8); | Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import fnmatch
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/x-freemind']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the files, so we can upload them along with our diff.
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
gitdiff = RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ extra_args, env=env)
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName():
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName()
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
# Todo(hayato): Windows users might use different path for configuration file.
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds the messages that are missing translations in the locale *.po files.
Usage:
From the personfinder root directory:
tools/find_missing_translations.py
PO file format:
http://www.gnu.org/software/hello/manual/gettext/PO-Files.html
"""
import codecs
import copy
import optparse
import os
import re
import sys
TRANSLATION_FILE = 'LC_MESSAGES/django.po'
MSG_ID_TOKEN = 'msgid'
MSG_STR_TOKEN = 'msgstr'
FUZZY_TOKEN = '#, fuzzy'
def check_key_value(option, opt, value):
"""Checks value is split in two by a ':', returns the parts in a tuple."""
result = value.split(':');
if not len(result) == 2:
raise optparse.OptionValueError(
"option %s: invalid value: %s should be of the form '<key>:<value>'"
% (opt, value))
return tuple(result)
class ExtraOptions(optparse.Option):
"""Extends base class to allow stringified key-value pairs as a type."""
TYPES = optparse.Option.TYPES + ("key_values",)
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["key_values"] = check_key_value
def OptParseDefinitions():
parser = optparse.OptionParser(option_class=ExtraOptions)
parser.add_option('--locale_dir', default='locale',
help='directory for the translation files (*.po, *.mo)')
parser.add_option('--template', action='store_true', default=False,
help='format output as a template to be filled in')
parser.add_option('--fuzzy_ok', action='store_true', default=False,
help='don\'t report fuzzy translations as missing')
parser.add_option('--exclude', action='append', type='key_values',
default=[], help='skip these source files')
parser.add_option('--verbose', action='store_true', default=False,
help='if true, print out a bunch of useless guff.')
return parser.parse_args()
def get_translation_files(locale_dir):
"""Yields (lang, .po file path) tuples for the given --locale_dir."""
for lang in os.listdir(locale_dir):
po_file = os.path.join(locale_dir, lang, TRANSLATION_FILE)
if os.path.isfile(po_file):
yield lang, po_file
def get_untranslated_msg_ids_from_file(po_file, fuzzy_ok):
"""Yields msg id's defined in the po_file that are missing translations."""
def get_messages(po_file):
"""Yields (msg id, msg str, comment, is_fuzzy) tuples as defined in the
po_file."""
msg_id, msg_str, comment, is_fuzzy = '', '', '', False
for line in codecs.open(po_file, 'r', 'utf8'):
if line.startswith('#'):
# comments start a new "block", so yield a result at this
# point if we've completed a block
if msg_id:
yield msg_id, msg_str, comment, is_fuzzy
msg_id, msg_str, comment, is_fuzzy = '', '', '', False
if line.startswith(FUZZY_TOKEN):
is_fuzzy = True
else:
comment += line
continue
if line:
if line.startswith(MSG_ID_TOKEN):
msg_id = line.replace(MSG_ID_TOKEN, '').strip().strip('"')
current = 'id'
elif line.startswith(MSG_STR_TOKEN):
msg_str = line.replace(MSG_STR_TOKEN, '').strip().strip('"')
current = 'str'
else:
if current == 'id':
msg_id += line.strip().strip('"')
elif current == 'str':
msg_str += line.strip().strip('"')
else:
print >>sys.stderr, (
'Parsing error in %r, line %r' % (po_file, line))
yield msg_id, msg_str, comment, is_fuzzy
for msg_id, msg_str, comment, is_fuzzy in get_messages(po_file):
if msg_id and (not msg_str or (is_fuzzy and not fuzzy_ok)):
yield msg_id, comment
_FILENAME_FROM_COMMENT = re.compile("#: ([^:]*):\d+")
def find_missing_translations(locale_dir, template, fuzzy_ok, excluded_files,
verbose=False):
"""Output to stdout the message id's that are missing translations."""
for lang, po_file in get_translation_files(locale_dir):
if lang != 'en':
print "LANGUAGE = %s" % lang
num_missing = 0
for msg_id, comment in get_untranslated_msg_ids_from_file(po_file,
fuzzy_ok):
filename_match = _FILENAME_FROM_COMMENT.match(comment)
if filename_match:
if (lang, filename_match.group(1)) in excluded_files:
continue
num_missing += 1
quoted_msg = msg_id.replace('"', '\"')
if template:
print '\n%s%s "%s"\n%s ""' % (
comment, MSG_ID_TOKEN, quoted_msg, MSG_STR_TOKEN)
else:
if verbose:
print ' missing: "%s"' % quoted_msg
if not num_missing:
print " ok"
def main():
options, args = OptParseDefinitions()
assert not args
print "verbose = %s" % options.verbose
find_missing_translations(options.locale_dir, options.template,
options.fuzzy_ok, options.exclude,
options.verbose)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.5
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interactive Python console connected to an app's datastore.
Instead of running this script directly, use the 'console' shell script,
which sets up the PYTHONPATH and other necessary environment variables."""
import code
import getpass
import logging
import optparse
import os
import sys
import urllib
import yaml
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
# Make some useful environment variables available.
APP_DIR = os.environ['APP_DIR']
APPENGINE_DIR = os.environ['APPENGINE_DIR']
PROJECT_DIR = os.environ['PROJECT_DIR']
TOOLS_DIR = os.environ['TOOLS_DIR']
TESTS_DIR = os.environ['TESTS_DIR']
def key_repr(key):
levels = []
while key:
levels.insert(0, '%s %s' % (key.kind(), key.id() or repr(key.name())))
key = key.parent()
return '<Key: %s>' % '/'.join(levels)
def model_repr(model):
if model.is_saved():
key = model.key()
return '<%s: %s>' % (key.kind(), key.id() or repr(key.name()))
else:
return '<%s: unsaved>' % model.kind()
def get_app_id():
"""Gets the app_id from the app.yaml configuration file."""
return yaml.safe_load(open(APP_DIR + '/app.yaml'))['application']
def connect(server, app_id=None, username=None, password=None):
"""Sets up a connection to an app that has the remote_api handler."""
if not app_id:
app_id = get_app_id()
print 'Application ID: %s' % app_id
print 'Server: %s' % server
if not username:
username = raw_input('Username: ')
else:
print 'Username: %s' % username
# Sets up users.get_current_user() inside of the console
os.environ['USER_EMAIL'] = username
if not password:
password = getpass.getpass('Password: ')
remote_api_stub.ConfigureRemoteDatastore(
app_id, '/remote_api', lambda: (username, password), server)
db.Query().count() # force authentication to happen now
def main():
default_address = 'localhost'
default_port = 8080
default_app_id = get_app_id()
default_username = os.environ['USER'] + '@google.com'
parser = optparse.OptionParser(usage='''%%prog [options] [server]
Starts an interactive console connected to an App Engine datastore.
The [server] argument is a shorthand for setting the hostname, port
number, and application ID. For example:
%%prog xyz.appspot.com # uses port 80, app ID 'xyz'
%%prog localhost:6789 # uses port 6789, app ID %r''' % default_app_id)
parser.add_option('-a', '--address',
help='appserver hostname (default: localhost)')
parser.add_option('-p', '--port', type='int',
help='appserver port number (default: 8080)')
parser.add_option('-A', '--application',
help='application ID (default: %s)' % default_app_id)
parser.add_option('-u', '--username',
help='username (default: %s)' % default_username)
parser.add_option('-c', '--command',
help='Python commands to execute')
options, args = parser.parse_args()
# Handle shorthand for address, port number, and app ID.
if args:
default_address, default_port = urllib.splitport(args[0])
default_port = int(default_port or 80)
if default_address != 'localhost':
default_app_id = default_address.split('.')[0]
# Apply defaults. (We don't use optparse defaults because we want to let
# explicit settings override our defaults.)
address = options.address or default_address
port = options.port or default_port
app_id = options.application or default_app_id
username = options.username or default_username
password = None
# Use a dummy password when connecting to a development app server.
if address == 'localhost':
password = 'foo'
# Connect to the app server.
logging.basicConfig(file=sys.stderr, level=logging.INFO)
connect('%s:%d' % (address, port), app_id, username, password)
# Set up more useful representations for interactive data manipulation
# and debugging. Alas, the App Engine runtime relies on the specific
# output of repr(), so this isn't safe in production, only debugging.
db.Key.__repr__ = key_repr
db.Model.__repr__ = model_repr
# Make some useful functions available in the interactive console.
import model
import setup
locals().update(model.__dict__)
locals().update(setup.__dict__)
if options.command:
exec options.command
else:
code.interact('', None, locals())
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unix command-line utility: import CSV files into the datastore."""
import remote_api
import csv
import importer
import sys
SHOW_ERRORS = 5
def import_from_file(host, subdomain, kind, converter, filename):
print '%s: importing %s records from %s' % (host, kind, filename)
written, skipped, total = importer.import_records(
subdomain, source_domain, converter,
importer.utf8_decoder(csv.DictReader(open(filename))))
for error, record in skipped[:SHOW_ERRORS]:
print ' - %s: %r' % (error, record)
if len(skipped) > SHOW_ERRORS:
print ' (more errors not shown)'
print 'wrote %d of %d (skipped %d with errors)' % (
written, total, len(skipped))
if __name__ == '__main__':
if len(sys.argv) < 6:
raise SystemExit(
'Usage: %s app_id host subdomain source_domain person.csv note.csv'
% sys.argv[0])
app_id, host, subdomain, source_domain, person_file, note_file = \
sys.argv[1:]
host = remote_api.connect(host, app_id)
if person_file:
import_from_file(
host, subdomain, 'Person', importer.create_person, person_file)
if note_file:
import_from_file(
host, subdomain, 'Note', importer.create_note, note_file)
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line utility: imports a site export file into the datastore.
You may want first clear the current datastore before running this;
see http://code.google.com/appengine/docs/python/tools/devserver.html#Using_the_Datastore
for instructions.
Once that's done, with the server running, do
$ tools/site_export_importer.py path/to/export_file.zip
"""
# import this first to ensure to add necessary paths to find other project
# imports
import remote_api
# python standard library
import logging
import optparse
import pfif
import sys
import StringIO
import zipfile
# personfinder modules
from model import *
import importer
def open_file_inside_zip(zip_path):
export_zip = zipfile.ZipFile(zip_path)
entry_count = len(export_zip.infolist())
if entry_count > 1:
raise IOError('zip archive had %d entries (expected 1)' % entry_count)
zip_entry = export_zip.infolist()[0]
logging.info('Reading from zip entry: %s', zip_entry.filename)
return StringIO.StringIO(export_zip.read(zip_entry.filename))
def next_n(a_list, batch_size):
"""Generator that yields the next batch_size items from a_list."""
batch = []
for item in a_list:
batch.append(item)
if len(batch) == batch_size:
yield batch
batch = []
if batch:
yield batch
def maybe_add_required_keys(a_dict, required_keys, dummy_value=u'?'):
for required_key in required_keys:
if not importer.strip(a_dict.get(required_key)):
logging.info(
'%s is missing from %s; will add dummy value(%s)',
required_key, a_dict, dummy_value)
a_dict[required_key] = dummy_value
return a_dict
def create_person(person_dict):
# TODO(kpy): Pass a subdomain argument to importer.create_person.
try:
return importer.create_person(person_dict)
except AssertionError:
pass
try:
person_dict = maybe_add_required_keys(
person_dict, (u'first_name', u'last_name'))
return importer.create_person(person_dict)
except AssertionError:
logging.info(
'skipping person %s as it cannot be made valid', person_dict)
return None
def create_note(note_dict):
# TODO(kpy): Pass a subdomain argument to importer.create_note.
try:
return importer.create_note(note_dict)
except AssertionError:
pass
try:
return importer.create_note(note_dict, requires_key=False)
except AssertionError:
logging.info(
'skipping note %s as it cannot be made valid', note_dict)
return None
def maybe_update_index(entity):
if hasattr(entity, 'update_index'):
entity.update_index(['old', 'new'])
def add_entities(entity_dicts, create_function, batch_size, kind, store_all):
"""Adds the data in entity_dicts to storage as entities created by
calling create_function. Uses next_n to group the entity_dicts into
batches that get stored using model.db.put(...), after being converted
into entities using create_function.
Args:
entity_dicts: an iterable of dictionaries containing data to be stored
create_function: a function that converts a dictionary to a new entity
batch_size: size of the batches used to write the entities to storage
kind: the text name of the entities for logging
"""
batch_count = (len(entity_dicts) + batch_size - 1)/batch_size
for i, batch in enumerate(next_n(entity_dicts, batch_size)):
entities = [create_function(d) for d in batch]
entities = [e for e in entities if e]
for e in entities:
maybe_update_index(e)
db.put(entities)
if i % 10 == 0 or i == batch_count - 1:
logging.info('%s update: just added batch %d/%d', kind, i + 1,
batch_count)
def import_site_export(export_path, remote_api_host,
app_id, batch_size, store_all):
# Log in, then use the pfif parser to parse the export file. Use the
# importer methods to convert the dicts to entities then add them as in
# import.py, but less strict, to ensure that all exported data is available.
remote_api.connect(remote_api_host, app_id)
logging.info('%s: importing exported records from %s',
remote_api_host, export_path)
if not export_path.endswith('.zip'):
export_fd = open(export_path)
else:
export_fd = open_file_inside_zip(export_path)
persons, notes = pfif.parse_file(export_fd)
logging.info('loaded %d persons, %d notes', len(persons), len(notes))
if not store_all:
persons = [d for d in persons if is_clone(d.get('person_record_id'))]
notes = [d for d in notes if is_clone(d.get('note_record_id'))]
logging.info(
'... down to %d persons, %d notes after excluding %r records',
len(persons), len(notes), HOME_DOMAIN)
logging.info('... adding persons')
add_entities(persons, create_person, batch_size, 'person', store_all)
logging.info('... adding notes')
add_entities(notes, create_note, batch_size, 'note', store_all)
def parse_command_line():
parser = optparse.OptionParser()
parser.add_option('--import_batch_size',
default=100,
help='size of batches used during data import')
parser.add_option('--store_home_domain_records',
action='store_true',
dest='store_all',
default=False,
help=('Allows importing of records in this app\'s home'
' domain. Disabled by default because this can'
' so can cause existing records with the same'
' key as an imported record to be overwritten'))
parser.add_option('--host',
default='localhost:8080',
help='HOST endpoint to post to for importing data. '
'(Required)')
parser.add_option('--app_id',
help='Application ID of endpoint (Optional for '
'*.appspot.com)')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('One argument required - the path to the export file')
return options, args
ARE_YOU_SURE = ('You have specified --store_home_domain_records:\n'
'This will override records in local storage if there are'
' feed records with matching numeric ids.\nContinue? (Y/n) ')
def main():
logging.basicConfig(level=logging.INFO)
options, args = parse_command_line()
export_path = args[0]
if options.store_all:
answer = raw_input(ARE_YOU_SURE)
if answer and answer[0] in ('n', 'N'):
logging.info("... exiting")
sys.exit(0)
import_site_export(
export_path, options.host, options.app_id,
options.import_batch_size, options.store_all)
if __name__ == '__main__':
main()
| Python |
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model import *
from utils import *
def setup_datastore():
"""Sets up the subject types and translations in a datastore. (Existing
subject types and messages will be updated; existing Subject or Report
information will not be changed or deleted.)"""
setup_subdomains()
setup_configs()
def wipe_datastore(*kinds):
"""Deletes everything in the datastore except Accounts and Secrets.
If 'kinds' is given, deletes only those kinds of entities."""
for kind in kinds or [Person, Note, Photo, Authorization,
Subdomain, config.ConfigEntry, UserActionLog]:
options = {'keys_only': True}
if kind in [Person, Note]: # Clean out expired stuff too.
options['filter_expired'] = False
keys = kind.all(**options).fetch(200)
while keys:
logging.info('%s: deleting %d...' % (kind.kind(), len(keys)))
db.delete(keys)
keys = kind.all(**options).fetch(200)
def reset_datastore():
"""Wipes everything in the datastore except Accounts and Secrets,
then sets up the datastore for new data."""
wipe_datastore()
setup_datastore()
def setup_subdomains():
Subdomain(key_name='haiti').put()
Subdomain(key_name='chile').put()
Subdomain(key_name='china').put()
Subdomain(key_name='pakistan').put()
Subdomain(key_name='lang-test').put()
def setup_configs():
"""Installs the configuration settings for Haiti, Chile, China, Pakistan."""
COMMON_KEYWORDS = ['person', 'people', 'finder', 'person finder',
'people finder', 'crisis', 'survivor', 'family']
# NOTE: the following two CAPTCHA keys are dummy keys for testing only. They
# should be replaced with secret keys upon launch.
config.set(captcha_private_key='6LfiOr8SAAAAAFyxGzWkhjo_GRXxYoDEbNkt60F2',
captcha_public_key='6LfiOr8SAAAAAM3wRtnLdgiVfud8uxCqVVJWCs-z')
# Google Language API key registered for person-finder.appspot.com
config.set(language_api_key='ABQIAAAAkyNXK1D6CLHJNPVQfiU8DhQowImlwyPaNDI' +
'ohCJwgv-5lcExKBTP5o1_bXlgQjGi0stsXRtN-p8fdw')
config.set_for_subdomain(
'haiti',
# Appended to "Google Person Finder" in page titles.
subdomain_titles={
'en': 'Haiti Earthquake',
'fr': u'S\xe9isme en Ha\xefti',
'ht': u'Tranbleman T\xe8 an Ayiti',
'es': u'Terremoto en Hait\xed'
},
# List of language codes that appear in the language menu.
language_menu_options=['en', 'ht', 'fr', 'es'],
# Content for the <meta name="keywords"> tag.
keywords=', '.join([
'haiti', 'earthquake', 'haiti earthquake', 'haitian',
u'ha\xefti', u's\xe9isme', 'tremblement', 'tremblement de terre',
'famille', 'recherche de personnes', 'terremoto'
] + COMMON_KEYWORDS),
# If false, hide the last_name field and use only first_name.
use_family_name=True,
# Presentation order for the given name and family name.
family_name_first=False,
# If false, hide the home_zip field.
use_postal_code=True,
# Require at least this many letters in each word of a text query.
min_query_word_length=2,
# Default map viewport for the location field in the note form.
map_default_zoom=7,
map_default_center=[18.968637, -72.284546],
map_size_pixels=[400, 280],
# If true, the feeds and read API require an authorization key.
read_auth_key_required=False,
# If true, the search API requires an authorization key.
search_auth_key_required=False
)
config.set_for_subdomain(
'chile',
subdomain_titles={
'en': 'Chile Earthquake',
'es': 'Terremoto en Chile'
},
language_menu_options=['en', 'es'],
keywords=', '.join([
'chile', 'earthquake', 'chile earthquake', 'chilean',
'terremoto', 'terremoto de chile',
'sobreviviente', 'buscador de personas'
] + COMMON_KEYWORDS),
use_family_name=True,
family_name_first=False,
use_postal_code=True,
min_query_word_length=2,
map_default_zoom=6,
map_default_center=[-35, -72], # near Curico, Chile
map_size_pixels=[400, 500],
read_auth_key_required=False,
search_auth_key_required=False
)
config.set_for_subdomain(
'china',
subdomain_titles={
'en': 'China Earthquake',
'zh-TW': u'\u4e2d\u570b\u5730\u9707',
'zh-CN': u'\u4e2d\u56fd\u5730\u9707'
},
language_menu_options=['en', 'zh-TW', 'zh-CN'],
keywords=', '.join([
'china', 'earthquake', 'china earthquake', 'chinese',
'qinghai', 'yushu'] + COMMON_KEYWORDS),
use_family_name=True,
family_name_first=True,
use_postal_code=True,
min_query_word_length=1,
map_default_zoom=7,
map_default_center=[33.005822, 97.006636], # near Yushu, China
map_size_pixels=[400, 280],
read_auth_key_required=False,
search_auth_key_required=False
)
config.set_for_subdomain(
'pakistan',
subdomain_titles={
'en': 'Pakistan Floods',
'ur': u'\u067e\u0627\u06a9\u0633\u062a\u0627\u0646\u06cc \u0633\u06cc\u0644\u0627\u0628'
},
language_menu_options=['en', 'ur'],
keywords=', '.join([
'pakistan', 'flood', 'pakistan flood', 'pakistani'
] + COMMON_KEYWORDS),
use_family_name=False,
family_name_first=False,
use_postal_code=False,
min_query_word_length=1,
map_default_zoom=6,
map_default_center=[33.36, 73.26], # near Rawalpindi, Pakistan
map_size_pixels=[400, 500],
read_auth_key_required=False,
search_auth_key_required=False
)
config.set_for_subdomain(
'lang-test',
# We set empty titles to avoid going over the 500-char limit
# of the field
subdomain_titles=dict(zip(LANGUAGE_ENDONYMS.keys(),
[''] * len(LANGUAGE_ENDONYMS))),
language_menu_options=list(LANGUAGE_EXONYMS.keys()),
keywords=', '.join(COMMON_KEYWORDS),
use_family_name=True,
family_name_first=True,
use_postal_code=True,
min_query_word_length=1,
map_default_zoom=6,
map_default_center=[0 ,0],
map_size_pixels=[400, 500],
read_auth_key_required=False,
search_auth_key_required=False
)
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unix command-line utility to download PFIF records from Atom feeds."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
import csv
import os
import sys
import time
# This script is in a tools directory below the root project directory.
TOOLS_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.dirname(TOOLS_DIR)
APP_DIR = os.path.join(PROJECT_DIR, 'app')
# Make imports work for Python modules that are part of this app.
sys.path.append(APP_DIR)
import pfif
import urllib
import urlparse
# Parsers for both types of records.
class PersonParser:
def parse_file(self, file):
return pfif.parse_file(file)[0]
class NoteParser:
def parse_file(self, file):
return pfif.parse_file(file)[1]
parsers = {'person': PersonParser, 'note': NoteParser}
# Writers for both types of records.
class CsvWriter:
def __init__(self, filename):
self.file = open(filename, 'w')
self.writer = csv.DictWriter(self.file, self.fields)
self.writer.writerow(dict((name, name) for name in self.fields))
print >>sys.stderr, 'Writing CSV to: %s' % filename
def write(self, records):
for record in records:
self.writer.writerow(dict(
(name, value.encode('utf-8'))
for name, value in record.items()))
self.file.flush()
def close(self):
self.file.close()
class PersonCsvWriter(CsvWriter):
fields = pfif.PFIF_1_2.fields['person']
class NoteCsvWriter(CsvWriter):
fields = pfif.PFIF_1_2.fields['note']
class XmlWriter:
def __init__(self, filename):
self.file = open(filename, 'w')
self.file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
self.file.write('<pfif:pfif xmlns:pfif="%s">\n' % pfif.PFIF_1_2.ns)
print >>sys.stderr, 'Writing PFIF 1.2 XML to: %s' % filename
def write(self, records):
for record in records:
self.write_record(self.file, record, indent=' ')
self.file.flush()
def close(self):
self.file.write('</pfif:pfif>\n')
self.file.close()
class PersonXmlWriter(XmlWriter):
write_record = pfif.PFIF_1_2.write_person
class NoteXmlWriter(XmlWriter):
write_record = pfif.PFIF_1_2.write_note
writers = {
'xml': {'person': PersonXmlWriter, 'note': NoteXmlWriter},
'csv': {'person': PersonCsvWriter, 'note': NoteCsvWriter}
}
def download_batch(url, auth_key, min_entry_date, skip, parser):
"""Fetches and parses one batch of records from an Atom feed."""
query_params = {
'min_entry_date': min_entry_date,
'skip': skip,
'max_results': 200
}
# if an authorization key had been given, adds it to the query parameters
if auth_key != '':
query_params['key'] = auth_key
query = urllib.urlencode(query_params)
if '?' in url:
url += '&' + query
else:
url += '?' + query
for attempt in range(5):
try:
return parser.parse_file(urllib.urlopen(url))
except:
continue
raise RuntimeError('Failed to fetch %r after 5 attempts' % url)
def download_all_since(url, auth_key, min_entry_date, parser, writer):
"""Fetches and parses batches of records repeatedly until all records
with an entry_date >= min_entry_date are retrieved."""
start_time = time.time()
print >>sys.stderr, ' entry_date >= %s:' % min_entry_date,
records = download_batch(url, auth_key, min_entry_date, 0, parser)
total = 0
while records:
writer.write(records)
total += len(records)
speed = total/float(time.time() - start_time)
print >>sys.stderr, 'fetched %d (total %d, %.1f rec/s)' % (
len(records), total, speed)
min_entry_date = max(r['entry_date'] for r in records)
skip = len([r for r in records if r['entry_date'] == min_entry_date])
print >>sys.stderr, ' entry_date >= %s:' % min_entry_date,
records = download_batch(url, auth_key, min_entry_date, skip, parser)
print >>sys.stderr, 'done.'
def main():
if (len(sys.argv) not in [6,7] or
sys.argv[1] not in ['person', 'note'] or
sys.argv[4] not in ['xml', 'csv']):
raise SystemExit('''
Usage: %s <type> <feed_url> <min_entry_date> <format> <filename> [auth_key]
type: 'person' or 'note'
feed_url: URL of the Person Finder Atom feed (as a shorthand, you can
give just the domain name and the rest of the URL will be assumed)
min_entry_date: retrieve only entries with entry_date >= this timestamp
(specify the timestamp in RFC 3339 format)
format: 'xml' or 'csv'
filename: filename of the file to write
auth_key (optional): authorization key if data is protected with a read key
''' % sys.argv[0])
type, feed_url, min_entry_date, format, filename = sys.argv[1:6]
# retrieve authorization key if it has been specified
auth_key = ''
if len(sys.argv) == 7:
auth_key = sys.argv[6]
# If given a plain domain name, assume the usual feed path.
if '/' not in feed_url:
feed_url = 'https://' + feed_url + '/feeds/' + type
print >>sys.stderr, 'Using feed URL: %s' % feed_url
# If given a date only, assume midnight UTC.
if 'T' not in min_entry_date:
min_entry_date += 'T00:00:00Z'
print >>sys.stderr, 'Using min_entry_date: %s' % min_entry_date
parser = parsers[type]()
writer = writers[format][type](filename)
print >>sys.stderr, 'Fetching %s records since %s:' % (type, min_entry_date)
download_all_since(feed_url, auth_key, min_entry_date, parser, writer)
writer.close()
if __name__ == '__main__':
main()
| Python |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for administration in the interactive console."""
from model import *
from utils import *
import logging
class Mapper(object):
# Subclasses should replace this with a model class (eg, model.Person).
KIND = None
# Subclasses can replace this with a list of (property, value) tuples
# to filter by.
FILTERS = []
def map(self, entity):
"""Updates a single entity.
Implementers should return a tuple containing two iterables
(to_update, to_delete)."""
return ([], [])
def get_query(self):
"""Returns a query over the specified kind, with any appropriate
filters applied."""
q = self.KIND.all()
for prop, value in self.FILTERS:
q.filter("%s =" % prop, value)
q.order("__key__")
return q
def run(self, batch_size=100):
"""Executes the map procedure over all matching entities."""
q = self.get_query()
entities = q.fetch(batch_size)
while entities:
to_put = []
to_delete = []
for entity in entities:
map_updates, map_deletes = self.map(entity)
to_put.extend(map_updates)
to_delete.extend(map_deletes)
if to_put:
db.put(to_put)
logging.info('entities written: %d' % len(to_put))
if to_delete:
db.delete(to_delete)
logging.info('entities deleted: %d' % len(to_delete))
q = self.get_query()
q.filter("__key__ >", entities[-1].key())
entities = q.fetch(batch_size)
class Reindexer(Mapper):
KIND = Person
def map(self, entity):
# This updates both old and new index and we need it for now,
# as first stage of deployment.
entity.update_index(['old','new'])
# Use the next line to index only with new index
#indexing.update_index_properties(entity)
return [entity], []
def Person_repr(person):
return '<Person %s %r %r>' % (
person.record_id, person.first_name, person.last_name)
def Note_repr(note):
return '<Note %s for %s by %r at %s>' % (
note.record_id, note.person_record_id,
note.author_name, note.entry_date)
Person.__repr__ = Person_repr
Note.__repr__ = Note_repr
def expand_id(subdomain, id):
id = str(id)
if '/' not in id:
id = subdomain + '.' + HOME_DOMAIN + '/person.' + id
return id
def clear_found(id):
person = get_person(id)
person.found = False
db.put(person)
def get_person(subdomain, id):
return Person.get(subdomain, expand_id(subdomain, id))
def get_notes(subdomain, id):
return list(Note.all_in_subdomain(subdomain).filter(
'person_record_id =', expand_id(subdomain, id)))
def delete_person(subdomain, id):
db.delete(get_entities_for_person(subdomain, id))
def get_entities_for_person(subdomain, id):
person = get_person(subdomain, id)
notes = get_notes(subdomain, id)
entities = [person] + notes
if person.photo_url:
if person.photo_url.startswith('/photo?id='):
id = person.photo_url.split('=', 1)[1]
photo = Photo.get_by_id(id)
if photo:
entities.append(photo)
return entities
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from model import *
from utils import *
from google.appengine.api import images
from google.appengine.runtime.apiproxy_errors import RequestTooLargeError
import indexing
import prefix
from django.utils.translation import ugettext as _
MAX_IMAGE_DIMENSION = 300
def validate_date(string):
"""Parses a date in YYYY-MM-DD format. This is a special case for manual
entry of the source_date in the creation form. Unlike the validators in
utils.py, this will throw an exception if the input is badly formatted."""
year, month, day = map(int, string.strip().split('-'))
return datetime(year, month, day)
def days_to_date(days):
"""Converts a duration signifying days-from-now to a datetime object."""
delta = timedelta(days=days)
return get_utcnow() + delta
class Create(Handler):
def get(self):
self.params.create_mode = True
self.render('templates/create.html',
onload_function='view_page_loaded()')
def post(self):
now = get_utcnow()
# Several messages here exceed the 80-column limit because django's
# makemessages script can't handle messages split across lines. :(
if self.config.use_family_name:
if not (self.params.first_name and self.params.last_name):
return self.error(400, _('The Given name and Family name are both required. Please go back and try again.'))
else:
if not self.params.first_name:
return self.error(400, _('Name is required. Please go back and try again.'))
if not self.params.author_name:
if self.params.clone:
return self.error(400, _('The Original author\'s name is required. Please go back and try again.'))
else:
return self.error(400, _('Your name is required in the "Source" section. Please go back and try again.'))
if self.params.add_note:
if not self.params.text:
return self.error(400, _('Message is required. Please go back and try again.'))
if self.params.status == 'is_note_author' and not self.params.found:
return self.error(400, _('Please check that you have been in contact with the person after the earthquake, or change the "Status of this person" field.'))
source_date = None
if self.params.source_date:
try:
source_date = validate_date(self.params.source_date)
except ValueError:
return self.error(400, _('Original posting date is not in YYYY-MM-DD format, or is a nonexistent date. Please go back and try again.'))
if source_date > now:
return self.error(400, _('Date cannot be in the future. Please go back and try again.'))
expiry_date = None
if self.params.expiry_option and self.params.expiry_option > 0:
expiry_date = days_to_date(self.params.expiry_option)
# If nothing was uploaded, just use the photo_url that was provided.
photo = None
photo_url = self.params.photo_url
# If a picture was uploaded, store it and the URL where we serve it.
photo_obj = self.params.photo
# if image is False, it means it's not a valid image
if photo_obj == False:
return self.error(400, _('Photo uploaded is in an unrecognized format. Please go back and try again.'))
if photo_obj:
if max(photo_obj.width, photo_obj.height) <= MAX_IMAGE_DIMENSION:
# No resize needed. Keep the same size but add a
# transformation so we can change the encoding.
photo_obj.resize(photo_obj.width, photo_obj.width)
elif photo_obj.width > photo_obj.height:
photo_obj.resize(
MAX_IMAGE_DIMENSION,
photo_obj.height * (MAX_IMAGE_DIMENSION / photo_obj.width))
else:
photo_obj.resize(
photo_obj.width * (MAX_IMAGE_DIMENSION / photo_obj.height),
MAX_IMAGE_DIMENSION)
try:
sanitized_photo = \
photo_obj.execute_transforms(output_encoding=images.PNG)
except RequestTooLargeError:
return self.error(400, _('The provided image is too large. Please upload a smaller one.'))
except Exception:
# There are various images.Error exceptions that can be raised,
# as well as e.g. IOError if the image is corrupt.
return self.error(400, _('There was a problem processing the image. Please try a different image.'))
photo = Photo(bin_data=sanitized_photo)
photo.put()
photo_url = photo.get_url(self)
other = ''
if self.params.description:
indented = ' ' + self.params.description.replace('\n', '\n ')
indented = indented.rstrip() + '\n'
other = 'description:\n' + indented
# Person records have to have a source_date; if none entered, use now.
source_date = source_date or now
# Determine the source name, or fill it in if the record is original
# (i.e. created for the first time here, not copied from elsewhere).
source_name = self.params.source_name
if not self.params.clone:
source_name = self.env.netloc # record originated here
person = Person.create_original(
self.subdomain,
entry_date=now,
expiry_date=expiry_date,
first_name=self.params.first_name,
last_name=self.params.last_name,
sex=self.params.sex,
date_of_birth=self.params.date_of_birth,
age=self.params.age,
home_street=self.params.home_street,
home_city=self.params.home_city,
home_state=self.params.home_state,
home_postal_code=self.params.home_postal_code,
home_neighborhood=self.params.home_neighborhood,
home_country=self.params.home_country,
author_name=self.params.author_name,
author_phone=self.params.author_phone,
author_email=self.params.author_email,
source_url=self.params.source_url,
source_date=source_date,
source_name=source_name,
photo=photo,
photo_url=photo_url,
other=other
)
person.update_index(['old', 'new'])
entities_to_put = [person]
if self.params.add_note:
note = Note.create_original(
self.subdomain,
entry_date=get_utcnow(),
person_record_id=person.record_id,
author_name=self.params.author_name,
author_phone=self.params.author_phone,
author_email=self.params.author_email,
source_date=source_date,
text=self.params.text,
last_known_location=self.params.last_known_location,
status=self.params.status,
found=bool(self.params.found),
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person)
person.update_from_note(note)
entities_to_put.append(note)
# Write one or both entities to the store.
db.put(entities_to_put)
if not person.source_url and not self.params.clone:
# Put again with the URL, now that we have a person_record_id.
person.source_url = self.get_url('/view', id=person.record_id)
db.put(person)
# If user wants to subscribe to updates, redirect to the subscribe page
if self.params.subscribe:
return self.redirect('/subscribe', id=person.record_id,
subscribe_email=self.params.author_email)
self.redirect('/view', id=person.record_id)
if __name__ == '__main__':
run(('/create', Create))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
import cgi
from datetime import datetime, timedelta
import httplib
import logging
import model
import os
import pfif
import re
import time
import traceback
import urllib
import urlparse
from google.appengine.dist import use_library
use_library('django', '1.1')
import django.conf
import django.utils.html
from google.appengine.api import images
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import webapp
import google.appengine.ext.webapp.template
import google.appengine.ext.webapp.util
from recaptcha.client import captcha
import config
if os.environ.get('SERVER_SOFTWARE', '').startswith('Development'):
# See http://code.google.com/p/googleappengine/issues/detail?id=985
import urllib
urllib.getproxies_macosx_sysconf = lambda: {}
ROOT = os.path.abspath(os.path.dirname(__file__))
# ==== Localization setup ======================================================
try:
django.conf.settings.configure()
except:
pass
django.conf.settings.LANGUAGE_CODE = 'en'
django.conf.settings.USE_I18N = True
django.conf.settings.LOCALE_PATHS = (os.path.join(ROOT, 'locale'),)
django.conf.settings.LANGUAGES_BIDI = ['ar', 'he', 'fa', 'iw', 'ur']
import django.utils.translation
# We use lazy translation in this file because the locale isn't set until the
# Handler is initialized.
from django.utils.translation import gettext_lazy as _
# Mapping from language codes to endonyms for all available languages.
LANGUAGE_ENDONYMS = {
'ar': u'\u0627\u0644\u0639\u0631\u0628\u064A\u0629',
'bg': u'\u0431\u044A\u043B\u0433\u0430\u0440\u0441\u043A\u0438',
'ca': u'Catal\u00E0',
'cs': u'\u010De\u0161tina',
'da': u'Dansk',
'de': u'Deutsch',
'el': u'\u0395\u03BB\u03BB\u03B7\u03BD\u03B9\u03BA\u03AC',
'en': u'English',
'en-GB': u'English (UK)',
'es': u'espa\u00F1ol',
'es_419': u'espa\u00F1ol (Latinoam\u00e9rica)',
'eu': u'Euskara',
'fa': u'\u0641\u0627\u0631\u0633\u06CC',
'fi': u'suomi',
'fil': u'Filipino',
'fr': u'Fran\u00e7ais',
'fr-CA': u'Fran\u00e7ais (Canada)',
'gl': u'Galego',
'hi': u'\u0939\u093F\u0928\u094D\u0926\u0940',
'hr': u'Hrvatski',
'ht': u'Krey\u00f2l',
'hu': u'magyar',
'id': u'Bahasa Indonesia',
'it': u'Italiano',
'he': u'\u05E2\u05D1\u05E8\u05D9\u05EA',
'ja': u'\u65E5\u672C\u8A9E',
'ko': u'\uD55C\uAD6D\uC5B4',
'lt': u'Lietuvi\u0173',
'lv': u'Latvie\u0161u valoda',
'nl': u'Nederlands',
'no': u'Norsk',
'pl': u'polski',
'pt-PT': u'Portugu\u00EAs (Portugal)',
'pt-BR': u'Portugu\u00EAs (Brasil)',
'ro': u'Rom\u00E2n\u0103',
'ru': u'\u0420\u0443\u0441\u0441\u043A\u0438\u0439',
'sk': u'Sloven\u010Dina',
'sl': u'Sloven\u0161\u010Dina',
'sr': u'\u0441\u0440\u043F\u0441\u043A\u0438',
'sv': u'Svenska',
'th': u'\u0E44\u0E17\u0E22',
'tr': u'T\u00FCrk\u00E7e',
'uk': u'\u0423\u043A\u0440\u0430\u0457\u043D\u0441\u044C\u043A\u0430',
'ur': u'\u0627\u0631\u062F\u0648',
'vi': u'Ti\u1EBFng Vi\u1EC7t',
'zh-TW': u'\u4E2D \u6587 (\u7E41 \u9AD4)',
'zh-CN': u'\u4E2D \u6587 (\u7B80 \u4F53)',
}
# Mapping from language codes to English names for all available languages.
LANGUAGE_EXONYMS = {
'ar': 'Arabic',
'bg': 'Bulgarian',
'ca': 'Catalan',
'cs': 'Czech',
'da': 'Danish',
'de': 'German',
'el': 'Greek',
'en': 'English (US)',
'en-GB': 'English (UK)',
'es': 'Spanish',
'es_419': 'Spanish (Latin America)',
'eu': 'Basque',
'fa': 'Persian',
'fi': 'Finnish',
'fil': 'Filipino',
'fr': 'French (France)',
'fr-CA': 'French (Canada)',
'gl': 'Galician',
'hi': 'Hindi',
'hr': 'Croatian',
'ht': 'Haitian Creole',
'hu': 'Hungarian',
'id': 'Indonesian',
'it': 'Italian',
'he': 'Hebrew',
'ja': 'Japanese',
'ko': 'Korean',
'lt': 'Lithuanian',
'lv': 'Latvian',
'nl': 'Dutch',
'no': 'Norwegian',
'pl': 'Polish',
'pt-PT': 'Portuguese (Portugal)',
'pt-BR': 'Portuguese (Brazil)',
'ro': 'Romanian',
'ru': 'Russian',
'sk': 'Slovak',
'sl': 'Slovenian',
'sr': 'Serbian',
'sv': 'Swedish',
'th': 'Thai',
'tr': 'Turkish',
'uk': 'Ukranian',
'ur': 'Urdu',
'vi': 'Vietnamese',
'zh-TW': 'Chinese (Traditional)',
'zh-CN': 'Chinese (Simplified)',
}
# Mapping from language codes to the names of LayoutCode constants. See:
# http://code.google.com/apis/ajaxlanguage/documentation/referenceKeyboard.html
VIRTUAL_KEYBOARD_LAYOUTS = {
'ur': 'URDU'
}
# ==== Field value text ========================================================
# UI text for the sex field when displaying a person.
PERSON_SEX_TEXT = {
# This dictionary must have an entry for '' that gives the default text.
'': '',
'female': _('female'),
'male': _('male'),
'other': _('other')
}
assert set(PERSON_SEX_TEXT.keys()) == set(pfif.PERSON_SEX_VALUES)
def get_person_sex_text(person):
"""Returns the UI text for a person's sex field."""
return PERSON_SEX_TEXT.get(person.sex or '')
# UI text for the expiry field when displayinga person.
PERSON_EXPIRY_TEXT = {
'-1': _('Unspecified'),
'30': _('About 1 month (30 days) from now'),
'60': _('About 2 months (60 days) from now'),
'90': _('About 3 months (90 days) from now'),
'180': _('About 6 months (180 days) from now'),
'360': _('About 1 year (360 days) from now'),
}
# UI text for the status field when posting or displaying a note.
NOTE_STATUS_TEXT = {
# This dictionary must have an entry for '' that gives the default text.
'': _('Unspecified'),
'information_sought': _('I am seeking information'),
'is_note_author': _('I am this person'),
'believed_alive':
_('I have received information that this person is alive'),
'believed_missing': _('I have reason to think this person is missing'),
'believed_dead': _('I have received information that this person is dead'),
}
assert set(NOTE_STATUS_TEXT.keys()) == set(pfif.NOTE_STATUS_VALUES)
def get_note_status_text(note):
"""Returns the UI text for a note's status field."""
return NOTE_STATUS_TEXT.get(note.status or '')
# UI text for the rolled-up status when displaying a person.
# This is intended for the results page; it's not yet used but the strings
# are in here so we can get the translations started.
PERSON_STATUS_TEXT = {
# This dictionary must have an entry for '' that gives the default text.
'': _('Unspecified'),
'information_sought': _('Someone is seeking information about this person'),
'is_note_author': _('This person has posted a message'),
'believed_alive':
_('Someone has received information that this person is alive'),
'believed_missing': _('Someone has reported that this person is missing'),
'believed_dead':
_('Someone has received information that this person is dead'),
}
assert set(PERSON_STATUS_TEXT.keys()) == set(pfif.NOTE_STATUS_VALUES)
def get_person_status_text(person):
"""Returns the UI text for a person's latest_status."""
return PERSON_STATUS_TEXT.get(person.latest_status or '')
# ==== String formatting =======================================================
def format_utc_datetime(dt):
if dt is None:
return ''
integer_dt = datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
return integer_dt.isoformat() + 'Z'
def format_sitemaps_datetime(dt):
integer_dt = datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
return integer_dt.isoformat() + '+00:00'
def to_utf8(string):
"""If Unicode, encode to UTF-8; if 8-bit string, leave unchanged."""
if isinstance(string, unicode):
string = string.encode('utf-8')
return string
def urlencode(params):
"""Apply UTF-8 encoding to any Unicode strings in the parameter dict.
Leave 8-bit strings alone. (urllib.urlencode doesn't support Unicode.)"""
keys = params.keys()
keys.sort() # Sort the keys to get canonical ordering
return urllib.urlencode([
(to_utf8(key), to_utf8(params[key]))
for key in keys if isinstance(params[key], basestring)])
def set_url_param(url, param, value):
"""This modifies a URL setting the given param to the specified value. This
may add the param or override an existing value, or, if the value is None,
it will remove the param. Note that value must be a basestring and can't be
an int, for example."""
url_parts = list(urlparse.urlparse(url))
params = dict(cgi.parse_qsl(url_parts[4]))
if value is None:
if param in params:
del(params[param])
else:
params[param] = value
url_parts[4] = urlencode(params)
return urlparse.urlunparse(url_parts)
def anchor_start(href):
"""Returns the HREF escaped and embedded in an anchor tag."""
return '<a href="%s">' % django.utils.html.escape(href)
def anchor(href, body):
"""Returns a string anchor HTML element with the given href and body."""
return anchor_start(href) + django.utils.html.escape(body) + '</a>'
# ==== Validators ==============================================================
# These validator functions are used to check and parse query parameters.
# When a query parameter is missing or invalid, the validator returns a
# default value. For parameter types with a false value, the default is the
# false value. For types with no false value, the default is None.
def strip(string):
return string.strip()
def validate_yes(string):
return (string.strip().lower() == 'yes') and 'yes' or ''
def validate_checkbox(string):
return (string.strip().lower() == 'on') and 'yes' or ''
def validate_role(string):
return (string.strip().lower() == 'provide') and 'provide' or 'seek'
def validate_int(string):
return string and int(string.strip())
def validate_sex(string):
"""Validates the 'sex' parameter, returning a canonical value or ''."""
if string:
string = string.strip().lower()
return string in pfif.PERSON_SEX_VALUES and string or ''
def validate_expiry(value):
"""Validates that the 'expiry_option' parameter is a positive integer;
otherwise returns -1 which represents the 'unspecified' status."""
try:
value = int(value)
except:
return -1
return value > 0 and value or -1
APPROXIMATE_DATE_RE = re.compile(r'^\d{4}(-\d\d)?(-\d\d)?$')
def validate_approximate_date(string):
if string:
string = string.strip()
if APPROXIMATE_DATE_RE.match(string):
return string
return ''
AGE_RE = re.compile(r'^\d+(-\d+)?$')
def validate_age(string):
"""Validates the 'age' parameter, returning a canonical value or ''."""
if string:
string = string.strip()
if AGE_RE.match(string):
return string
return ''
def validate_status(string):
"""Validates an incoming status parameter, returning one of the canonical
status strings or ''. Note that '' is always used as the Python value
to represent the 'unspecified' status."""
if string:
string = string.strip().lower()
return string in pfif.NOTE_STATUS_VALUES and string or ''
DATETIME_RE = re.compile(r'^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)Z$')
def validate_datetime(string):
if not string:
return None # A missing value is okay.
match = DATETIME_RE.match(string)
if match:
return datetime(*map(int, match.groups()))
raise ValueError('Bad datetime: %r' % string)
def validate_timestamp(string):
try:
# Its all tz'less once you're in time() land.
# the key is the roundtrip via TestsBase.set_utcnow in server_tests.py.
# The invariant is:
# dt == datetime.utcfromtimestamp(calendar.timegm(dt.utctimetuple()))
return string and datetime.utcfromtimestamp(float(string))
except:
raise ValueError('Bad timestamp %s' % string)
def validate_image(bytestring):
try:
image = ''
if bytestring:
image = images.Image(bytestring)
image.width
return image
except:
return False
def validate_version(string):
"""Version, if present, should be in pfif versions."""
if string and string not in pfif.PFIF_VERSIONS:
raise ValueError('Bad pfif version: %s' % string)
return string
# ==== Other utilities =========================================================
def optionally_filter_sensitive_fields(records, auth=None):
"""Removes sensitive fields from a list of dictionaries, unless the client
has full read authorization."""
if not (auth and auth.full_read_permission):
filter_sensitive_fields(records)
def filter_sensitive_fields(records):
"""Removes sensitive fields from a list of dictionaries."""
for record in records:
if 'date_of_birth' in record:
record['date_of_birth'] = ''
if 'author_email' in record:
record['author_email'] = ''
if 'author_phone' in record:
record['author_phone'] = ''
if 'email_of_found_person' in record:
record['email_of_found_person'] = ''
if 'phone_of_found_person' in record:
record['phone_of_found_person'] = ''
def get_secret(name):
"""Gets a secret from the datastore by name, or returns None if missing."""
secret = model.Secret.get_by_key_name(name)
if secret:
return secret.secret
# a datetime.datetime object representing debug time.
_utcnow_for_test = None
def set_utcnow_for_test(now):
"""Set current time for debug purposes."""
global _utcnow_for_test
_utcnow_for_test = now
def get_utcnow():
"""Return current time in utc, or debug value if set."""
global _utcnow_for_test
return _utcnow_for_test or datetime.utcnow()
# ==== Base Handler ============================================================
class Struct:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
global_cache = {}
global_cache_insert_time = {}
class Handler(webapp.RequestHandler):
# Handlers that don't use a subdomain configuration can set this to False.
subdomain_required = True
# Handlers that require HTTPS can set this to True.
https_required = False
# Handlers to enable even for deactivated subdomains can set this to True.
ignore_deactivation = False
auto_params = {
'lang': strip,
'query': strip,
'first_name': strip,
'last_name': strip,
'sex': validate_sex,
'date_of_birth': validate_approximate_date,
'age': validate_age,
'home_street': strip,
'home_neighborhood': strip,
'home_city': strip,
'home_state': strip,
'home_postal_code': strip,
'home_country': strip,
'author_name': strip,
'author_phone': strip,
'author_email': strip,
'source_url': strip,
'source_date': strip,
'source_name': strip,
'description': strip,
'expiry_option': validate_expiry,
'dupe_notes': validate_yes,
'id': strip,
'text': strip,
'status': validate_status,
'last_known_location': strip,
'found': validate_yes,
'email_of_found_person': strip,
'phone_of_found_person': strip,
'error': strip,
'role': validate_role,
'clone': validate_yes,
'small': validate_yes,
'style': strip,
'add_note': validate_yes,
'photo_url': strip,
'photo': validate_image,
'max_results': validate_int,
'skip': validate_int,
'min_entry_date': validate_datetime,
'person_record_id': strip,
'omit_notes': validate_yes,
'id1': strip,
'id2': strip,
'id3': strip,
'version': validate_version,
'content_id': strip,
'target': strip,
'signature': strip,
'flush_cache': validate_yes,
'operation': strip,
'confirm': validate_yes,
'key': strip,
'subdomain_new': strip,
'utcnow': validate_timestamp,
'subscribe_email' : strip,
'subscribe' : validate_checkbox,
}
def redirect(self, url, **params):
if re.match('^[a-z]+:', url):
if params:
url += '?' + urlencode(params)
else:
url = self.get_url(url, **params)
return webapp.RequestHandler.redirect(self, url)
def cache_key_for_request(self):
# Use the whole url as the key. We make sure the lang is included or
# the old language may be sticky.
return set_url_param(self.request.url, 'lang', self.params.lang)
def render_from_cache(self, cache_time, key=None):
"""Render from cache if appropriate. Returns true if done."""
if not cache_time:
return False
now = time.time()
key = self.cache_key_for_request()
if cache_time > (now - global_cache_insert_time.get(key, 0)):
self.write(global_cache[key])
logging.debug('Rendering cached response.')
return True
logging.debug('Render cache missing/stale, re-rendering.')
return False
def render(self, name, cache_time=0, **values):
"""Renders the template, optionally caching locally.
The optional cache is local instead of memcache--this is faster but
will be recomputed for every running instance. It also consumes local
memory, but that's not a likely issue for likely amounts of cached data.
Args:
name: name of the file in the template directory.
cache_time: optional time in seconds to cache the response locally.
"""
if self.render_from_cache(cache_time):
return
values['env'] = self.env # pass along application-wide context
values['params'] = self.params # pass along the query parameters
# TODO(kpy): Remove "templates/" from all template names in calls
# to this method, and have this method call render_to_string instead.
response = webapp.template.render(os.path.join(ROOT, name), values)
self.write(response)
if cache_time:
now = time.time()
key = self.cache_key_for_request()
global_cache[key] = response
global_cache_insert_time[key] = now
def render_to_string(self, name, **values):
"""Renders the specified template to a string."""
return webapp.template.render(
os.path.join(ROOT, 'templates', name), values)
def error(self, code, message=''):
self.info(code, message, style='error')
def info(self, code, message='', message_html='', style='info'):
is_error = 400 <= code < 600
if is_error:
webapp.RequestHandler.error(self, code)
else:
self.response.set_status(code)
if not message and not message_html:
message = '%d: %s' % (code, httplib.responses.get(code))
try:
self.render('templates/message.html', cls=style,
message=message, message_html=message_html)
except:
self.response.out.write(message)
self.terminate_response()
def terminate_response(self):
"""Prevents any further output from being written."""
self.response.out.write = lambda *args: None
self.get = lambda *args: None
self.post = lambda *args: None
def write(self, text):
self.response.out.write(text)
def select_locale(self):
"""Detect and activate the appropriate locale. The 'lang' query
parameter has priority, then the django_language cookie, then the
default setting."""
lang = (self.params.lang or
self.request.cookies.get('django_language', None) or
django.conf.settings.LANGUAGE_CODE)
lang = urllib.quote(lang)
self.response.headers.add_header(
'Set-Cookie', 'django_language=%s' % lang)
django.utils.translation.activate(lang)
rtl = django.utils.translation.get_language_bidi()
self.response.headers.add_header('Content-Language', lang)
return lang, rtl
def get_url(self, path, scheme=None, **params):
"""Constructs the absolute URL for a given path and query parameters,
preserving the current 'subdomain', 'small', and 'style' parameters."""
for name in ['subdomain', 'small', 'style']:
if self.request.get(name) and name not in params:
params[name] = self.request.get(name)
if params:
path += ('?' in path and '&' or '?') + urlencode(params)
current_scheme, netloc, _, _, _ = urlparse.urlsplit(self.request.url)
if netloc.split(':')[0] == 'localhost':
scheme = 'http' # HTTPS is not available during testing
return (scheme or current_scheme) + '://' + netloc + path
def get_subdomain(self):
"""Determines the subdomain of the request."""
# The 'subdomain' query parameter always overrides the hostname.
if self.request.get('subdomain'):
return self.request.get('subdomain')
levels = self.request.headers.get('Host', '').split('.')
if levels[-2:] == ['appspot', 'com'] and len(levels) >= 4:
# foo.person-finder.appspot.com -> subdomain 'foo'
# bar.kpy.latest.person-finder.appspot.com -> subdomain 'bar'
return levels[0]
# Use the 'default_subdomain' setting, if present.
return config.get('default_subdomain')
def get_parent_domain(self):
"""Determines the app's domain, not including the subdomain."""
levels = self.request.headers.get('Host', '').split('.')
if levels[-2:] == ['appspot', 'com']:
return '.'.join(levels[-3:])
return '.'.join(levels)
def get_start_url(self, subdomain=None):
"""Constructs the URL to the start page for this subdomain."""
subdomain = subdomain or self.subdomain
levels = self.request.headers.get('Host', '').split('.')
if levels[-2:] == ['appspot', 'com']:
return 'http://' + '.'.join([subdomain] + levels[-3:])
return self.get_url('/', subdomain=subdomain)
def send_mail(self, **params):
"""Sends e-mail using a sender address that's allowed for this app."""
# TODO(kpy): When the outgoing mail queue is added, use it instead
# of sending mail immediately.
app_id = os.environ['APPLICATION_ID']
mail.send_mail(
sender='Do not reply <do-not-reply@%s.appspotmail.com>' % app_id,
**params)
def get_captcha_html(self, error_code=None, use_ssl=False):
"""Generates the necessary HTML to display a CAPTCHA validation box."""
# We use the 'custom_translations' parameter for UI messages, whereas
# the 'lang' parameter controls the language of the challenge itself.
# reCAPTCHA falls back to 'en' if this parameter isn't recognized.
lang = self.env.lang.split('-')[0]
return captcha.get_display_html(
public_key=config.get('captcha_public_key'),
use_ssl=use_ssl, error=error_code, lang=lang,
custom_translations={
# reCAPTCHA doesn't support all languages, so we treat its
# messages as part of this app's usual translation workflow
'instructions_visual': _('Type the two words:'),
'instructions_audio': _('Type what you hear:'),
'play_again': _('Play the sound again'),
'cant_hear_this': _('Download the sound as MP3'),
'visual_challenge': _('Get a visual challenge'),
'audio_challenge': _('Get an audio challenge'),
'refresh_btn': _('Get a new challenge'),
'help_btn': _('Help'),
'incorrect_try_again': _('Incorrect. Try again.')
}
)
def get_captcha_response(self):
"""Returns an object containing the CAPTCHA response information for the
given request's CAPTCHA field information."""
challenge = self.request.get('recaptcha_challenge_field')
response = self.request.get('recaptcha_response_field')
remote_ip = os.environ['REMOTE_ADDR']
return captcha.submit(
challenge, response, config.get('captcha_private_key'), remote_ip)
def handle_exception(self, exception, debug_mode):
logging.error(traceback.format_exc())
self.error(500, _(
'There was an error processing your request. Sorry for the '
'inconvenience. Our administrators will investigate the source '
'of the problem, but please check that the format of your '
'request is correct.'))
def initialize(self, *args):
webapp.RequestHandler.initialize(self, *args)
self.params = Struct()
self.env = Struct()
# Log AppEngine-specific request headers.
for name in self.request.headers.keys():
if name.lower().startswith('x-appengine'):
logging.debug('%s: %s' % (name, self.request.headers[name]))
# Validate query parameters.
for name, validator in self.auto_params.items():
try:
value = self.request.get(name, '')
setattr(self.params, name, validator(value))
except Exception, e:
setattr(self.params, name, validator(None))
return self.error(400, 'Invalid parameter %s: %s' % (name, e))
if self.params.flush_cache:
# Useful for debugging and testing.
memcache.flush_all()
global_cache.clear()
global_cache_insert_time.clear()
# Activate localization.
lang, rtl = self.select_locale()
# Put common non-subdomain-specific template variables in self.env.
self.env.netloc = urlparse.urlparse(self.request.url)[1]
self.env.domain = self.env.netloc.split(':')[0]
self.env.parent_domain = self.get_parent_domain()
self.env.lang = lang
self.env.virtual_keyboard_layout = VIRTUAL_KEYBOARD_LAYOUTS.get(lang)
self.env.rtl = rtl
self.env.back_chevron = rtl and u'\xbb' or u'\xab'
self.env.analytics_id = get_secret('analytics_id')
self.env.maps_api_key = get_secret('maps_api_key')
# Provide the status field values for templates.
self.env.statuses = [Struct(value=value, text=NOTE_STATUS_TEXT[value])
for value in pfif.NOTE_STATUS_VALUES]
# Expiry option field values (durations)
expiry_keys = PERSON_EXPIRY_TEXT.keys().sort()
self.env.expiry_options = [
Struct(value=value, text=PERSON_EXPIRY_TEXT[value])
for value in sorted(PERSON_EXPIRY_TEXT.keys(),
key=int)
]
# Check for SSL (unless running on localhost for development).
if self.https_required and self.env.domain != 'localhost':
scheme = urlparse.urlparse(self.request.url)[0]
if scheme != 'https':
return self.error(403, 'HTTPS is required.')
# Determine the subdomain.
self.subdomain = self.get_subdomain()
# Check for an authorization key.
self.auth = None
if self.subdomain and self.params.key:
self.auth = model.Authorization.get(self.subdomain, self.params.key)
# Handlers that don't need a subdomain configuration can skip it.
if not self.subdomain:
if self.subdomain_required:
return self.error(400, 'No subdomain specified.')
return
# Reject requests for subdomains that haven't been activated.
if not model.Subdomain.get_by_key_name(self.subdomain):
return self.error(404, 'No such domain.')
# Get the subdomain-specific configuration.
self.config = config.Configuration(self.subdomain)
# To preserve the subdomain properly as the user navigates the site:
# (a) For links, always use self.get_url to get the URL for the HREF.
# (b) For forms, use a plain path like "/view" for the ACTION and
# include {{env.subdomain_field_html}} inside the form element.
subdomain_field_html = (
'<input type="hidden" name="subdomain" value="%s">' %
self.request.get('subdomain', ''))
# Put common subdomain-specific template variables in self.env.
self.env.subdomain = self.subdomain
titles = self.config.subdomain_titles or {}
self.env.subdomain_title = titles.get(lang, titles.get('en', '?'))
self.env.keywords = self.config.keywords
self.env.family_name_first = self.config.family_name_first
self.env.use_family_name = self.config.use_family_name
self.env.use_postal_code = self.config.use_postal_code
self.env.map_default_zoom = self.config.map_default_zoom
self.env.map_default_center = self.config.map_default_center
self.env.map_size_pixels = self.config.map_size_pixels
self.env.language_api_key = self.config.language_api_key
self.env.subdomain_field_html = subdomain_field_html
self.env.main_url = self.get_url('/')
self.env.embed_url = self.get_url('/embed')
self.env.main_page_custom_html = self.config.main_page_custom_html
self.env.results_page_custom_html = self.config.results_page_custom_html
self.env.view_page_custom_html = self.config.view_page_custom_html
# Provide the contents of the language menu.
self.env.language_menu = [
{'lang': lang,
'endonym': LANGUAGE_ENDONYMS.get(lang, '?'),
'url': set_url_param(self.request.url, 'lang', lang)}
for lang in self.config.language_menu_options or []
]
# If this subdomain has been deactivated, terminate with a message.
if self.config.deactivated and not self.ignore_deactivation:
self.env.language_menu = []
self.render('templates/message.html', cls='deactivation',
message_html=self.config.deactivation_message_html)
self.terminate_response()
def is_test_mode(self):
"""Returns True if the request is in test mode. Request is considered
to be in test mode if the remote IP address is the localhost and if
the 'test_mode' HTTP parameter exists and is set to 'yes'."""
post_is_test_mode = validate_yes(self.request.get('test_mode', ''))
client_is_localhost = os.environ['REMOTE_ADDR'] == '127.0.0.1'
return post_is_test_mode and client_is_localhost
def run(*mappings, **kwargs):
webapp.util.run_wsgi_app(webapp.WSGIApplication(list(mappings), **kwargs))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import simplejson
import sys
from model import *
from utils import *
def encode_date(object):
"""Encodes Python dates as specially marked JavaScript strings."""
if isinstance(object, datetime):
y, l, d, h, m, s = object.timetuple()[:6]
return '<<new Date(%d,%d,%d,%d,%d)>>' % (y, l - 1, d, h, m)
def pack_json(json):
"""Compacts JSON to save bandwidth (currently saves about 40%)."""
# Remove unnecessary spaces and punctuation.
json = json.replace('{"c": ', '{c:').replace('{"v": ', '{v:')
json = json.replace('}, {', '},{')
# Replace "new Date(...)" with a shorter function call, "D(...)".
json = ('(D = function(y,l,d,h,m) {return new Date(y,l,d,h,m);}) && ' +
json.replace('new Date(', 'D('))
return json
class Dashboard(Handler):
# This dashboard shows information for all subdomains.
subdomain_required = False
def get(self):
# Determine the time range to display. We currently show the last
# 10 days of data, which encodes to about 100 kb of JSON text.
max_time = get_utcnow()
min_time = max_time - timedelta(10)
# Gather the data into a table, with a column for each subdomain. See:
# http://code.google.com/apis/visualization/documentation/reference.html#dataparam
subdomains = sorted([s.key().name() for s in Subdomain.all()])
data = {}
for scan_name in ['person', 'note']:
data[scan_name] = []
blanks = []
for subdomain in subdomains:
query = Counter.all_finished_counters(subdomain, scan_name)
counters = query.filter('timestamp >', min_time).fetch(1000)
data[scan_name] += [
{'c': [{'v': c.timestamp}] + blanks + [{'v': c.get('all')}]}
for c in counters
]
# Move over one column for the next subdomain.
blanks.append({})
# Encode the table as JSON.
json = simplejson.dumps(data, default=encode_date)
# Convert the specially marked JavaScript strings to JavaScript dates.
json = json.replace('"<<', '').replace('>>"', '')
# Render the page with the JSON data in it.
self.render('templates/admin_dashboard.html',
data_js=pack_json(json),
subdomains_js=simplejson.dumps(subdomains))
if __name__ == '__main__':
run(('/admin/dashboard', Dashboard))
| Python |
import base64
import cgi
try:
from Crypto.Cipher import AES
except:
raise Exception ("You need the pycrpyto library: http://cheeseshop.python.org/pypi/pycrypto/")
MAIL_HIDE_BASE="http://mailhide.recaptcha.net"
def asurl (email,
public_key,
private_key):
"""Wraps an email address with reCAPTCHA mailhide and
returns the url. public_key is the public key from reCAPTCHA
(in the base 64 encoded format). Private key is the AES key, and should
be 32 hex chars."""
cryptmail = _encrypt_string (email, base64.b16decode (private_key, casefold=True), '\0' * 16)
base64crypt = base64.urlsafe_b64encode (cryptmail)
return "%s/d?k=%s&c=%s" % (MAIL_HIDE_BASE, public_key, base64crypt)
def ashtml (email,
public_key,
private_key):
"""Wraps an email address with reCAPTCHA Mailhide and
returns html that displays the email"""
url = asurl (email, public_key, private_key)
(userpart, domainpart) = _doterizeemail (email)
return """%(user)s<a href='%(url)s' onclick="window.open('%(url)s', '', 'toolbar=0,scrollbars=0,location=0,statusbar=0,menubar=0,resizable=0,width=500,height=300'); return false;" title="Reveal this e-mail address">...</a>@%(domain)s""" % {
'user' : cgi.escape (userpart),
'domain' : cgi.escape (domainpart),
'url' : cgi.escape (url),
}
def _pad_string (str, block_size):
numpad = block_size - (len (str) % block_size)
return str + numpad * chr (numpad)
def _encrypt_string (str, aes_key, aes_iv):
if len (aes_key) != 16:
raise Exception ("expecting key of length 16")
if len (aes_iv) != 16:
raise Exception ("expecting iv of length 16")
return AES.new (aes_key, AES.MODE_CBC, aes_iv).encrypt (_pad_string (str, 16))
def _doterizeemail (email):
"""replaces part of the username with dots"""
try:
[user, domain] = email.split ('@')
except:
# handle invalid emails... sorta
user = email
domain = ""
if len(user) <= 4:
user_prefix = user[:1]
elif len(user) <= 6:
user_prefix = user[:3]
else:
user_prefix = user[:4]
return (user_prefix, domain)
| Python |
# This file is originally from recaptcha-client 1.0.5 (obtained from pypi),
# now modified to support custom translations.
import urllib
import urllib2
import simplejson
API_SSL_SERVER = 'https://api-secure.recaptcha.net'
API_SERVER = 'http://api.recaptcha.net'
VERIFY_SERVER = 'api-verify.recaptcha.net'
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def get_display_html(public_key, use_ssl=False, error=None,
lang='en', custom_translations={}):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
server = API_SERVER
if use_ssl:
server = API_SSL_SERVER
# Objects created by _('...') are unpalatable to simplejson.
custom_translations = dict((key, unicode(str(value), 'utf-8'))
for (key, value) in custom_translations.items())
options = {
'theme': 'white',
'lang': lang,
'custom_translations': custom_translations
}
return '''
<script>
var RecaptchaOptions = %(options)s;
</script>
<script src="%(server)s/challenge?k=%(public_key)s%(error_param)s"></script>
<noscript>
<iframe src="%(server)s/noscript?k=%(public_key)s%(error_param)s"
height="300" width="500" frameborder="0"></iframe><br>
<textarea name="recaptcha_challenge_field" rows="3" cols="40"></textarea>
<input type="hidden" name="recaptcha_response_field" value="manual_challenge">
</noscript>
''' % {
'options': simplejson.dumps(options),
'server': server,
'public_key': public_key,
'error_param': error_param,
}
def submit (recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form
recaptcha_response_field -- The value of recaptcha_response_field from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len (recaptcha_response_field) and len (recaptcha_challenge_field)):
return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol')
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode ({
'privatekey': encode_if_necessary(private_key),
'remoteip' : encode_if_necessary(remoteip),
'challenge': encode_if_necessary(recaptcha_challenge_field),
'response' : encode_if_necessary(recaptcha_response_field),
})
request = urllib2.Request (
url = "http://%s/verify" % VERIFY_SERVER,
data = params,
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "reCAPTCHA Python"
}
)
httpresp = urllib2.urlopen (request)
return_values = httpresp.read ().splitlines ();
httpresp.close();
return_code = return_values [0]
if (return_code == "true"):
return RecaptchaResponse (is_valid=True)
else:
return RecaptchaResponse (is_valid=False, error_code = return_values [1])
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'eyalf@google.com (Eyal Fink)'
import unicodedata
import logging
import re
class TextQuery():
"""This class encapsulates the processing we are doing both for indexed
strings like first_name and last_name and for a query string. Currently
the processing includes normalization (see doc below) and splitting to
words. Future stuff we might add: indexing of phone numbers, extracting
of locations for geo-search, synonym support."""
def __init__(self, query):
self.query = query
self.normalized = normalize(query)
# Split out each CJK ideograph as its own word.
# The main CJK ideograph range is from U+4E00 to U+9FFF.
# CJK Extension A is from U+3400 to U+4DFF.
cjk_separated = re.sub(ur'([\u3400-\u9fff])', r' \1 ', self.normalized)
# Separate the query into words.
self.words = cjk_separated.split()
# query_words is redundant now but I'm leaving it since I don't want to
# change the signature of TextQuery yet
self.query_words = self.words
def normalize(string):
"""Normalize a string to all uppercase, remove accents, delete apostrophes,
and replace non-letters with spaces."""
string = unicode(string or '').strip().upper()
letters = []
"""TODO(eyalf): we need to have a better list of types we are keeping
one that will work for non latin languages"""
for ch in unicodedata.normalize('NFD', string):
category = unicodedata.category(ch)
if category.startswith('L'):
letters.append(ch)
elif category != 'Mn' and ch != "'": # Treat O'Hearn as OHEARN
letters.append(' ')
return ''.join(letters)
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import mail
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class EmailSender(webapp.RequestHandler):
"""Simple handler to send email; intended to be called from a taskqueue
task so email sending can be throttled to stay below app engine quotas."""
def post(self):
mail.send_mail(sender=self.request.get('sender'),
subject=self.request.get('subject'),
to=self.request.get('to'),
body=self.request.get('body'))
def main():
run_wsgi_app(webapp.WSGIApplication([
('/admin/send_mail', EmailSender),
]))
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import db
from model import Subscription
from utils import *
import reveal
from django.utils.translation import ugettext as _
class Unsubscribe(Handler):
def get(self):
email = self.request.get('email')
token = self.request.get('token')
is_verified = reveal.verify('unsubscribe:%s' % email, token)
if not is_verified:
return self.error(200, _('This link is invalid.'))
subscription = Subscription.get(self.subdomain, self.params.id, email)
if subscription:
db.delete(subscription)
return self.info(200, _('You have successfully unsubscribed.'))
else:
return self.error(200, _('You are already unsubscribed.'))
if __name__ == '__main__':
run(('/unsubscribe', Unsubscribe))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import reveal
import model
import utils
from model import db
from django.utils.translation import ugettext as _
# The number of days an expired record lingers before the DeleteExpired task
# wipes it from the database. When a user deletes a record through the UI,
# we carry that out by setting the expiry to the current time, so this is also
# the number of days after deletion during which the record can be restored.
EXPIRED_TTL_DAYS = 3
class Delete(utils.Handler):
"""Handles a user request to delete a person record."""
def get(self):
"""Prompts the user with a Turing test before carrying out deletion."""
person = model.Person.get(self.subdomain, self.params.id)
if not person:
return self.error(400, 'No person with ID: %r' % self.params.id)
self.render('templates/delete.html',
person=person,
view_url=self.get_url('/view', id=self.params.id),
captcha_html=self.get_captcha_html())
def post(self):
"""If the user passed the Turing test, delete the record."""
person = model.Person.get(self.subdomain, self.params.id)
if not person:
return self.error(400, 'No person with ID: %r' % self.params.id)
captcha_response = self.get_captcha_response()
if self.is_test_mode() or captcha_response.is_valid:
# Log the user action.
model.UserActionLog.put_new(
'delete', person, self.request.get('reason_for_deletion'))
self.delete_person(person)
return self.info(200, _('The record has been deleted.'))
else:
captcha_html = self.get_captcha_html(captcha_response.error_code)
self.render('templates/delete.html', person=person,
view_url=self.get_url('/view', id=self.params.id),
captcha_html=captcha_html)
def get_restore_url(self, person, ttl=3*24*3600):
"""Returns a URL to be used for restoring a deleted person record.
The default TTL for a restoration URL is 3 days."""
key_name = person.key().name()
data = 'restore:%s' % key_name
token = reveal.sign(data, ttl)
return self.get_url('/restore', token=token, id=key_name)
def delete_person(self, person):
"""Delete a person record and associated data. If it's an original
record, deletion can be undone within EXPIRED_TTL_DAYS days."""
if person.is_original():
# For an original record, set the expiry date and send notifiations
# to all the related e-mail addresses offering an undelete link.
# (The externally visible result will be as if we overwrote the
# record with an expiry date and blank fields.)
# i18n: Subject line of an e-mail message notifying a user
# i18n: that a person record has been deleted
subject = _(
'[Person Finder] Deletion notice for '
'"%(first_name)s %(last_name)s"'
) % {'first_name': person.first_name, 'last_name': person.last_name}
# Send e-mail to all the addresses notifying them of the deletion.
for email in person.get_associated_emails():
if email == person.author_email:
template_name = 'deletion_email_for_person_author.txt'
else:
template_name = 'deletion_email_for_note_author.txt'
self.send_mail(
subject=subject,
to=email,
body=self.render_to_string(
template_name,
first_name=person.first_name,
last_name=person.last_name,
site_url=self.get_url('/'),
days_until_deletion=EXPIRED_TTL_DAYS,
restore_url=self.get_restore_url(person)
)
)
# Set the expiry_date to now, and set is_expired flags to match.
person.expiry_date = utils.get_utcnow()
person.put_expiry_flags()
else:
# For a clone record, we don't have authority to change the
# expiry_date, so we just delete the record now. (The externally
# visible result will be as if we had never received a copy of it.)
db.delete([person] + person.get_notes(filter_expired=False))
if __name__ == '__main__':
utils.run(('/delete', Delete))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Atom PFIF 1.2 feed generation."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
import pfif
from pfif import format_utc_datetime, xml_escape
def write_element(file, tag, contents, indent=''):
"""Writes a single XML element with the given contents, if non-empty."""
if contents:
file.write(indent + '<%s>%s</%s>\n' %
(tag, xml_escape(contents).encode('utf-8'), tag))
class AtomPfifVersion:
def __init__(self, pfif_version):
self.pfif_version = pfif_version
def write_person_entry(self, file, person, notes, feed_title, indent=''):
"""Writes a PFIF Atom entry, given a person record and a list of its
note records. 'feed_title' is the title of the containing feed."""
file.write(indent + '<entry>\n')
indent += ' '
self.pfif_version.write_person(file, person, notes, indent)
write_element(file, 'id', 'pfif:' + person['person_record_id'], indent)
first_name = person.get('first_name', '')
last_name = person.get('last_name', '')
separator = first_name and last_name and ' ' or ''
title = first_name + separator + last_name
write_element(file, 'title', title, indent)
file.write(indent + '<author>\n')
write_element(file, 'name', person.get('author_name'), indent + ' ')
write_element(file, 'email', person.get('author_email'), indent + ' ')
file.write(indent + '</author>\n')
write_element(file, 'updated', person.get('source_date'), indent)
file.write(indent + '<source>\n')
write_element(file, 'title', feed_title, indent + ' ')
file.write(indent + '</source>\n')
write_element(file, 'content', title, indent)
indent = indent[2:]
file.write(indent + '</entry>\n')
def write_person_feed(self, file, persons, get_notes_for_person,
url, title, subtitle, updated):
"""Takes a list of person records and a function that gets the list
of note records for each person, and writes a PFIF Atom feed to the
given file."""
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<feed xmlns="http://www.w3.org/2005/Atom"\n')
file.write(' xmlns:pfif="%s">\n' % self.pfif_version.ns)
write_element(file, 'id', url, ' ')
write_element(file, 'title', title, ' ')
write_element(file, 'subtitle', subtitle, ' ')
write_element(file, 'updated', format_utc_datetime(updated), ' ')
file.write(' <link rel="self">%s</link>\n' % xml_escape(url))
for person in persons:
self.write_person_entry(
file, person, get_notes_for_person(person), title, ' ')
file.write('</feed>\n')
def write_note_entry(self, file, note, indent=''):
"""Writes a PFIF Atom entry, given a note record."""
file.write(indent + '<entry>\n')
indent += ' '
self.pfif_version.write_note(file, note, indent)
write_element(file, 'id', 'pfif:%s' % note['note_record_id'], indent)
write_element(file, 'title', note.get('text', '')[:140], indent)
file.write(indent + '<author>\n')
write_element(file, 'name', note.get('author_name'), indent + ' ')
write_element(file, 'email', note.get('author_email'), indent + ' ')
file.write(indent + '</author>\n')
write_element(file, 'updated', note.get('entry_date'), indent)
write_element(file, 'content', note.get('text'), indent)
indent = indent[2:]
file.write(indent + '</entry>\n')
def write_note_feed(self, file, notes, url, title, subtitle, updated):
"""Takes a list of notes and writes a PFIF Atom feed to a file."""
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<feed xmlns="http://www.w3.org/2005/Atom"\n')
file.write(' xmlns:pfif="%s">\n' % self.pfif_version.ns)
write_element(file, 'id', url, ' ')
write_element(file, 'title', title, ' ')
write_element(file, 'subtitle', subtitle, ' ')
write_element(file, 'updated', format_utc_datetime(updated), ' ')
file.write(' <link rel="self">%s</link>\n' % xml_escape(url))
for note in notes:
self.write_note_entry(file, note, ' ')
file.write('</feed>\n')
ATOM_PFIF_1_2 = AtomPfifVersion(pfif.PFIF_1_2)
ATOM_PFIF_1_3 = AtomPfifVersion(pfif.PFIF_1_3)
ATOM_PFIF_VERSIONS = {
'1.2': ATOM_PFIF_1_2,
'1.3': ATOM_PFIF_1_3
}
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import *
from model import *
class Main(Handler):
subdomain_required = False
def get(self):
if not self.subdomain:
self.write('''
<style>body { font-family: arial; font-size: 13px; }</style>
<p>Select a Person Finder site:<ul>
''')
for key in Subdomain.all(keys_only=True):
url = self.get_start_url(key.name())
self.write('<li><a href="%s">%s</a>' % (url, key.name()))
self.write('</ul>')
return
if self.render_from_cache(cache_time=600):
return
# Round off the count so people don't expect it to change every time
# they add a record.
person_count = Counter.get_count(self.subdomain, 'person.all')
if person_count < 100:
num_people = 0 # No approximate count will be displayed.
else:
# 100, 200, 300, etc.
num_people = int(round(person_count, -2))
self.render('templates/main.html', cache_time=600,
num_people=num_people,
seek_url=self.get_url('/query', role='seek'),
provide_url=self.get_url('/query', role='provide'))
if __name__ == '__main__':
run(('/', Main))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model import *
from utils import *
import prefix
import pfif
import reveal
import sys
from django.utils.translation import ugettext as _
# Fields to show for side-by-side comparison.
COMPARE_FIELDS = pfif.PFIF_1_2.fields['person']
class MultiView(Handler):
def get(self):
# To handle multiple persons, we create a single object where
# each property is a list of values, one for each person.
# This makes page rendering easier.
person = dict([(prop, []) for prop in COMPARE_FIELDS])
any = dict([(prop, None) for prop in COMPARE_FIELDS])
# Get all persons from db.
# TODO: Can later optimize to use fewer DB calls.
for i in [1, 2, 3]:
id = self.request.get('id%d' % i)
if not id:
break
p = Person.get(self.subdomain, id)
for prop in COMPARE_FIELDS:
val = getattr(p, prop)
if prop == 'sex': # convert enum value to localized text
val = get_person_sex_text(p)
person[prop].append(val)
any[prop] = any[prop] or val
# Check if private info should be revealed.
content_id = 'multiview:' + ','.join(person['person_record_id'])
reveal_url = reveal.make_reveal_url(self, content_id)
show_private_info = reveal.verify(content_id, self.params.signature)
# TODO: Handle no persons found.
# Add a calculated full name property - used in the title.
person['full_name'] = [
fname + ' ' + lname
for fname, lname in zip(person['first_name'], person['last_name'])]
standalone = self.request.get('standalone')
# Note: we're not showing notes and linked persons information
# here at the moment.
self.render('templates/multiview.html',
person=person, any=any, standalone=standalone,
cols=len(person['first_name']) + 1,
onload_function='view_page_loaded()', markdup=True,
show_private_info=show_private_info, reveal_url=reveal_url)
def post(self):
if not self.params.text:
return self.error(
200, _('Message is required. Please go back and try again.'))
if not self.params.author_name:
return self.error(
200, _('Your name is required in the "About you" section. Please go back and try again.'))
# TODO: To reduce possible abuse, we currently limit to 3 person
# match. We could guard using e.g. an XSRF token, which I don't know how
# to build in GAE.
ids = set()
for i in [1, 2, 3]:
id = getattr(self.params, 'id%d' % i)
if not id:
break
ids.add(id)
if len(ids) > 1:
notes = []
for person_id in ids:
for other_id in ids - set([person_id]):
note = Note.create_original(
self.subdomain,
entry_date=get_utcnow(),
person_record_id=person_id,
linked_person_record_id=other_id,
text=self.params.text,
author_name=self.params.author_name,
author_phone=self.params.author_phone,
author_email=self.params.author_email,
source_date=get_utcnow())
notes.append(note)
db.put(notes)
self.redirect('/view', id=self.params.id1)
if __name__ == '__main__':
run(('/multiview', MultiView))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import *
class Embed(Handler):
def get(self):
env = self.env
self.render('templates/embed.html', close_button=self.params.small,
gadget_link_html=anchor_start(
'http://%s/gadget?lang=%s' % (env.netloc, env.lang)),
apache_link_html=anchor_start(
'http://www.apache.org/licenses/LICENSE-2.0.html'),
developers_link_html=anchor_start(
'http://code.google.com/p/googlepersonfinder'),
link_end_html='</a>'
)
if __name__ == '__main__':
run(('/embed', Embed))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import mail
from recaptcha.client import captcha
from model import db
import datetime
import model
import reveal
import utils
from django.utils.translation import ugettext as _
# When a record is restored after undeletion, its new expiry date is this
# length of time into the future.
RESTORED_RECORD_TTL = datetime.timedelta(60, 0, 0)
class RestoreError(Exception):
"""Container for user-facing error messages about the restore operation."""
pass
class Restore(utils.Handler):
"""This handler lets the user restore a record that has expired but hasn't
been wiped yet. This can 'undelete' a deleted record, as long as it has
been less than within delete.EXPIRED_TTL_DAYS days after deletion."""
def get(self):
"""Prompts a user with a CAPTCHA to restore the specified record.
There must be a valid token supplied in the 'token' query parameter."""
try:
person, token = self.get_person_and_verify_params()
except RestoreError, e:
return self.error(400, unicode(e))
self.render('templates/restore.html',
captcha_html=self.get_captcha_html(),
token=token, id=self.params.id)
def post(self):
"""If the Turing test response is valid, restores the record by setting
its expiry date into the future. Otherwise, offer another test."""
try:
person, token = self.get_person_and_verify_params()
except RestoreError, err:
return self.error(400, unicode(err))
captcha_response = self.get_captcha_response()
if not captcha_response.is_valid and not self.is_test_mode():
captcha_html = self.get_captcha_html(captcha_response.error_code)
self.render('templates/restore.html',
captcha_html=captcha_html, token=token,
id=self.params.id)
return
# Log the user action.
model.UserActionLog.put_new('restore', person)
# Move the expiry date into the future to cause the record to reappear.
person.expiry_date = utils.get_utcnow() + RESTORED_RECORD_TTL
person.put_expiry_flags()
record_url = self.get_url(
'/view', id=person.record_id, subdomain=person.subdomain)
subject = _(
'[Person Finder] Record restoration notice for '
'"%(first_name)s %(last_name)s"'
) % {
'first_name': person.first_name,
'last_name': person.last_name
}
email_addresses = person.get_associated_emails()
for address in email_addresses:
self.send_mail(
subject=subject,
to=address,
body=self.render_to_string(
'restoration_email.txt',
first_name=person.first_name,
last_name=person.last_name,
record_url=record_url
)
)
self.redirect(record_url)
def get_person_and_verify_params(self):
"""Checks the request for a valid person id and valid crypto token.
Returns a tuple containing: (person, token)
If there is an error we raise a RestoreError, instead of pretending
we're using C."""
person = model.Person.get_by_key_name(self.params.id)
if not person:
raise RestoreError(
'The record with the following ID no longer exists: %s' %
self.params.id.split(':', 1)[1])
token = self.request.get('token')
data = 'restore:%s' % self.params.id
if not reveal.verify(data, token):
raise RestoreError('The token was invalid')
return (person, token)
if __name__ == '__main__':
utils.run(('/restore', Restore))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for approximate string prefix queries."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
from google.appengine.ext import db
import unicodedata
def normalize(string):
"""Normalize a string to all uppercase and remove accents."""
string = unicode(string or '').strip().upper()
decomposed = unicodedata.normalize('NFD', string)
return ''.join(ch for ch in decomposed if unicodedata.category(ch) != 'Mn')
def add_prefix_properties(model_class, *properties):
"""Adds indexable properties to a model class to support prefix queries.
All properties ending in '_' are extra properties. The 'properties'
arguments should be names of existing string properties on the class."""
for property in properties:
# This property contains a copy of the entire string normalized.
setattr(model_class, property + '_n_', db.StringProperty())
# This property contains just the first character, normalized.
setattr(model_class, property + '_n1_', db.StringProperty())
# This property contains just the first two characters, normalized.
setattr(model_class, property + '_n2_', db.StringProperty())
# Record the prefix properties.
if not hasattr(model_class, '_prefix_properties'):
model_class._prefix_properties = []
model_class._prefix_properties += list(properties)
# Update the model class.
db._initialize_properties(
model_class, model_class.__name__, model_class.__bases__,
model_class.__dict__)
def update_prefix_properties(entity):
"""Finds and updates all prefix-related properties on the given entity."""
if hasattr(entity, '_prefix_properties'):
for property in entity._prefix_properties:
value = normalize(getattr(entity, property))
setattr(entity, property + '_n_', value)
setattr(entity, property + '_n1_', value[:1])
setattr(entity, property + '_n2_', value[:2])
def filter_prefix(query, **kwargs):
"""Approximately filters a query for the given prefix strings. Each
keyword argument should specify a desired normalized prefix for a string
property."""
for property, prefix in kwargs.items():
prefix = normalize(prefix)
if len(prefix) >= 2:
query = query.filter(property + '_n2_ =', prefix[:2])
elif len(prefix) == 1:
query = query.filter(property + '_n1_ =', prefix[:1])
return query
def get_prefix_matches(query, limit, **kwargs):
"""Scans the results from a given query, yielding only those which actually
match the given normalized prefixes. Each keyword argument should specify
a desired normalized prefix for a string property."""
for entity in query:
for property, prefix in kwargs.items():
value = normalize(getattr(entity, property))
if not value.startswith(normalize(prefix)):
break
else:
yield entity
limit -= 1
if limit == 0:
return
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for retrieving uploaded photos for display."""
import model
import utils
class Photo(utils.Handler):
def get(self):
if not self.params.id:
return self.error(404, 'No photo id was specified.')
photo = model.Photo.get_by_id(int(self.params.id))
if not photo:
return self.error(404, 'There is no photo for the specified id.')
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(photo.bin_data)
if __name__ == '__main__':
utils.run(('/photo', Photo))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import db
from recaptcha.client import captcha
import model
import reveal
import utils
class FlagNote(utils.Handler):
"""Marks a specified note as hidden (spam)."""
def get(self):
note = model.Note.get(self.subdomain, self.params.id)
if not note:
return self.error(400, 'No note with ID: %r' % self.params.id)
note.status_text = utils.get_note_status_text(note)
captcha_html = note.hidden and self.get_captcha_html() or ''
# Check if private info should be revealed.
content_id = 'view:' + note.person_record_id
reveal_url = reveal.make_reveal_url(self, content_id)
show_private_info = reveal.verify(content_id, self.params.signature)
self.render('templates/flag_note.html',
onload_function='load_language_api()',
note=note, captcha_html=captcha_html, reveal_url=reveal_url,
flag_note_page=True, show_private_info=show_private_info,
signature=self.params.signature)
def post(self):
note = model.Note.get(self.subdomain, self.params.id)
if not note:
return self.error(400, 'No note with ID: %r' % self.params.id)
captcha_response = note.hidden and self.get_captcha_response()
if not note.hidden or captcha_response.is_valid or self.is_test_mode():
note.hidden = not note.hidden
db.put(note)
model.UserActionLog.put_new(
(note.hidden and 'hide') or 'unhide',
note, self.request.get('reason_for_report', ''))
self.redirect(self.get_url('/view', id=note.person_record_id,
signature=self.params.signature))
elif not captcha_response.is_valid:
captcha_html = self.get_captcha_html(captcha_response.error_code)
self.render('templates/flag_note.html',
onload_function='load_language_api()',
note=note, captcha_html=captcha_html,
signature=self.params.signature)
if __name__ == '__main__':
utils.run(('/flag_note', FlagNote))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import *
class Query(Handler):
def get(self):
self.render('templates/query.html', cache_time=600)
if __name__ == '__main__':
run(('/query', Query))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PFIF 1.1, 1.2, and 1.3 parsing and serialization (see http://zesty.ca/pfif/).
This module converts between PFIF XML documents (PFIF 1.1, 1.2, or 1.3) and
plain Python dictionaries that have PFIF 1.3 field names as keys (always 1.3)
and Unicode strings as values. Some useful constants are also defined here
according to the PFIF specification. Use parse() to parse PFIF 1.1, 1.2, or
1.3; use PFIF_1_1, PFIF_1_2, or PFIF_1_3 to serialize to the desired version."""
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
import StringIO
import logging
import os
import re
import xml.sax
import xml.sax.handler
# Possible values for the 'sex' field on a person record.
PERSON_SEX_VALUES = [
'', # unspecified
'female',
'male',
'other'
]
# Possible values for the 'status' field on a note record.
NOTE_STATUS_VALUES = [
'', # unspecified
'information_sought',
'is_note_author',
'believed_alive',
'believed_missing',
'believed_dead',
]
# Fields to preserve in a placeholder for an expired record.
PLACEHOLDER_FIELDS = [
'person_record_id',
'source_date',
'entry_date',
'expiry_date'
]
def xml_escape(s):
# XML may only contain the following characters (even after entity
# references are expanded). See: http://www.w3.org/TR/REC-xml/#charsets
s = re.sub(ur'''[^\x09\x0a\x0d\x20-\ud7ff\ue000-\ufffd]''', '', s)
return s.replace('&','&').replace('<','<').replace('>','>')
class PfifVersion:
def __init__(self, version, ns, fields, mandatory_fields, serializers):
self.version = version
self.ns = ns
# A dict mapping each record type to a list of its fields in order.
self.fields = fields
# A dict mapping each record type to a list of its mandatory fields.
self.mandatory_fields = mandatory_fields
# A dict mapping field names to serializer functions.
self.serializers = serializers
def check_tag(self, (ns, local), parent=None):
"""Given a namespace-qualified tag and its parent, returns the PFIF
type or field name if the tag is valid, or None if the tag is not
recognized."""
if ns == self.ns:
if not parent or local in self.fields[parent]:
return local
def write_fields(self, file, type, record, indent=''):
"""Writes PFIF tags for a record's fields."""
for field in self.fields[type]:
if record.get(field) or field in self.mandatory_fields[type]:
escaped_value = xml_escape(record.get(field, ''))
file.write(indent + '<pfif:%s>%s</pfif:%s>\n' %
(field, escaped_value.encode('utf-8'), field))
def write_person(self, file, person, notes=[], indent=''):
"""Writes PFIF for a person record and a list of its note records."""
file.write(indent + '<pfif:person>\n')
self.write_fields(file, 'person', person, indent + ' ')
for note in notes:
self.write_note(file, note, indent + ' ')
file.write(indent + '</pfif:person>\n')
def write_note(self, file, note, indent=''):
"""Writes PFIF for a note record."""
file.write(indent + '<pfif:note>\n')
self.write_fields(file, 'note', note, indent + ' ')
file.write(indent + '</pfif:note>\n')
def write_file(self, file, persons, get_notes_for_person=lambda p: []):
"""Takes a list of person records and a function that gets the list
of note records for each person, and writes PFIF to the given file
object. Each record is a plain dictionary of strings."""
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<pfif:pfif xmlns:pfif="%s">\n' % self.ns)
for person in persons:
self.write_person(file, person, get_notes_for_person(person), ' ')
file.write('</pfif:pfif>\n')
def entity_to_dict(self, entity, fields):
"""Convert an entity to a Python dictionary of Unicode strings."""
record = {}
for field in fields:
if field == 'home_zip' and not hasattr(entity, field):
# When writing PFIF 1.1, rename home_postal_code to home_zip.
value = getattr(entity, 'home_postal_code', None)
else:
value = getattr(entity, field, None)
if value:
record[field] = SERIALIZERS.get(field, nop)(value)
return record
def person_to_dict(self, entity, expired=False):
dict = self.entity_to_dict(entity, self.fields['person'])
if expired: # Clear all fields except those needed for the placeholder.
for field in set(dict.keys()) - set(PLACEHOLDER_FIELDS):
del dict[field]
return dict
def note_to_dict(self, entity):
return self.entity_to_dict(entity, self.fields['note'])
# Serializers that convert Python values to PFIF strings.
def nop(value):
return value
def format_boolean(value):
return value and 'true' or 'false'
def format_utc_datetime(dt):
return dt and dt.replace(microsecond=0).isoformat() + 'Z' or ''
SERIALIZERS = { # Serialization functions (for fields that need conversion).
'found': format_boolean,
'source_date': format_utc_datetime,
'entry_date': format_utc_datetime,
'expiry_date': format_utc_datetime
}
PFIF_1_1 = PfifVersion(
'1.1',
'http://zesty.ca/pfif/1.1',
{
'person': [ # Fields of a <person> element, in PFIF 1.1 standard order.
'person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'first_name',
'last_name',
'home_city',
'home_state',
'home_neighborhood',
'home_street',
'home_zip',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element, in PFIF 1.1 standard order.
'note_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{
'person': ['person_record_id', 'first_name', 'last_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_1_2 = PfifVersion(
'1.2',
'http://zesty.ca/pfif/1.2',
{
'person': [ # Fields of a <person> element in PFIF 1.2.
'person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'first_name',
'last_name',
'sex',
'date_of_birth',
'age',
'home_street',
'home_neighborhood',
'home_city',
'home_state',
'home_postal_code',
'home_country',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element in PFIF 1.2.
'note_record_id',
'person_record_id',
'linked_person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'status',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{
'person': ['person_record_id', 'first_name', 'last_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_1_3 = PfifVersion(
'1.3',
'http://zesty.ca/pfif/1.3',
{
'person': [ # Fields of a <person> element in PFIF 1.3.
'person_record_id',
'entry_date',
'expiry_date',
'author_name',
'author_email',
'author_phone',
'source_name',
'source_date',
'source_url',
'full_name',
'first_name',
'last_name',
'sex',
'date_of_birth',
'age',
'home_street',
'home_neighborhood',
'home_city',
'home_state',
'home_postal_code',
'home_country',
'photo_url',
'other',
],
'note': [ # Fields of a <note> element in PFIF 1.3.
'note_record_id',
'person_record_id',
'linked_person_record_id',
'entry_date',
'author_name',
'author_email',
'author_phone',
'source_date',
'found',
'status',
'email_of_found_person',
'phone_of_found_person',
'last_known_location',
'text',
]
},
{
'person': ['person_record_id', 'full_name'],
'note': ['note_record_id', 'author_name', 'source_date', 'text'],
},
SERIALIZERS)
PFIF_VERSIONS = {
'1.1': PFIF_1_1,
'1.2': PFIF_1_2,
'1.3': PFIF_1_3
}
PFIF_DEFAULT_VERSION = '1.2'
assert PFIF_DEFAULT_VERSION in PFIF_VERSIONS
def check_pfif_tag(name, parent=None):
"""Recognizes a PFIF XML tag from either version of PFIF."""
return PFIF_1_3.check_tag(name, parent) or \
PFIF_1_2.check_tag(name, parent) or \
PFIF_1_1.check_tag(name, parent)
def split_first_last_name(all_names):
"""Attempt to extract a last name for a person from a multi-first-name."""
name = re.sub(r'\(.*\)', ' ', all_names)
name = re.sub(r'\(\S*', ' ', name)
name = re.sub(r'\d', '', name)
names = name.split()
if len(names) > 1:
last_name = re.search(
r' (\S*(-+ | -+|-+)?\S+)\s*$', name).group(1).strip()
return all_names.replace(last_name, ''), last_name.replace(' ', '')
class Handler(xml.sax.handler.ContentHandler):
"""SAX event handler for parsing PFIF documents."""
def __init__(self):
self.tags = []
self.person = {}
self.note = {}
self.enclosed_notes = [] # Notes enclosed by the current <person>.
self.person_records = []
self.note_records = []
def startElementNS(self, tag, qname, attrs):
self.tags.append(tag)
if check_pfif_tag(tag) == 'person':
self.person = {}
self.enclosed_notes = []
elif check_pfif_tag(tag) == 'note':
self.note = {}
def endElementNS(self, tag, qname):
assert self.tags.pop() == tag
if check_pfif_tag(tag) == 'person':
self.person_records.append(self.person)
if 'person_record_id' in self.person:
# Copy the person's person_record_id to any enclosed notes.
for note in self.enclosed_notes:
note['person_record_id'] = self.person['person_record_id']
elif check_pfif_tag(tag) == 'note':
# Save all parsed notes (whether or not enclosed in <person>).
self.note_records.append(self.note)
self.enclosed_notes.append(self.note)
def append_to_field(self, record, tag, parent, content):
field = check_pfif_tag(tag, parent)
if field:
record[field] = record.get(field, u'') + content
elif content.strip():
logging.warn('ignored tag %r with content %r', tag, content)
def characters(self, content):
if content and len(self.tags) >= 2:
parent, tag = self.tags[-2], self.tags[-1]
if check_pfif_tag(parent) == 'person':
self.append_to_field(self.person, tag, 'person', content)
elif check_pfif_tag(parent) == 'note':
self.append_to_field(self.note, tag, 'note', content)
def parse_file(pfif_utf8_file):
"""Reads a UTF-8-encoded PFIF file to give a list of person records and a
list of note records. Each record is a plain dictionary of strings."""
handler = Handler()
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, True)
parser.setContentHandler(handler)
parser.parse(pfif_utf8_file)
return handler.person_records, handler.note_records
def parse(pfif_text):
"""Takes the text of a PFIF document, as a Unicode string or UTF-8 string,
and returns a list of person records and a list of note records. Each
record is a plain dictionary of strings."""
if isinstance(pfif_text, unicode):
pfif_text = pfif_text.decode('utf-8')
return parse_file(StringIO.StringIO(pfif_text))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for Atom PFIF 1.2 person and note feeds."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
import atom
import datetime
import model
import pfif
import utils
HARD_MAX_RESULTS = 200 # Clients can ask for more, but won't get more.
MAX_SKIP = 800 # App Engine imposes a limit of 1000 on max_results + skip.
def get_latest_entry_date(entities):
if entities:
return max(entity.entry_date for entity in entities)
else:
return utils.get_utcnow()
def get_pfif_version(params):
"""Get the pfif object for the specified version, or the default."""
return pfif.PFIF_VERSIONS.get(
params.version or pfif.PFIF_DEFAULT_VERSION)
class Person(utils.Handler):
https_required = True
def get(self):
if self.config.read_auth_key_required and not (
self.auth and self.auth.read_permission):
self.response.set_status(403)
self.write('Missing or invalid authorization key\n')
return
pfif_version = get_pfif_version(self.params)
atom_version = atom.ATOM_PFIF_VERSIONS.get(pfif_version.version)
max_results = min(self.params.max_results or 10, HARD_MAX_RESULTS)
skip = min(self.params.skip or 0, MAX_SKIP)
if self.params.omit_notes: # Return only the person records.
get_notes_for_person = lambda person: []
else:
def get_notes_for_person(person):
notes = model.Note.get_by_person_record_id(
self.subdomain, person['person_record_id'])
records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
return records
query = model.Person.all_in_subdomain(
self.subdomain, filter_expired=False)
if self.params.min_entry_date: # Scan forward.
query = query.order('entry_date')
query = query.filter('entry_date >=', self.params.min_entry_date)
else: # Show recent entries, scanning backward.
query = query.order('-entry_date')
persons = query.fetch(max_results, skip)
updated = get_latest_entry_date(persons)
self.response.headers['Content-Type'] = 'application/xml'
records = [pfif_version.person_to_dict(person, person.is_expired)
for person in persons]
utils.optionally_filter_sensitive_fields(records, self.auth)
atom_version.write_person_feed(
self.response.out, records, get_notes_for_person,
self.request.url, self.env.netloc, '', updated)
class Note(utils.Handler):
https_required = True
def get(self):
if self.config.read_auth_key_required and not (
self.auth and self.auth.read_permission):
self.response.set_status(403)
self.write('Missing or invalid authorization key\n')
return
pfif_version = get_pfif_version(self.params)
atom_version = atom.ATOM_PFIF_VERSIONS.get(pfif_version.version)
max_results = min(self.params.max_results or 10, HARD_MAX_RESULTS)
skip = min(self.params.skip or 0, MAX_SKIP)
query = model.Note.all_in_subdomain(self.subdomain)
if self.params.min_entry_date: # Scan forward.
query = query.order('entry_date')
query = query.filter('entry_date >=', self.params.min_entry_date)
else: # Show recent entries, scanning backward.
query = query.order('-entry_date')
if self.params.person_record_id: # Show notes for a specific person.
query = query.filter('person_record_id =',
self.params.person_record_id)
notes = query.fetch(max_results, skip)
updated = get_latest_entry_date(notes)
self.response.headers['Content-Type'] = 'application/xml'
records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
atom_version.write_note_feed(
self.response.out, records, self.request.url,
self.env.netloc, '', updated)
if __name__ == '__main__':
utils.run(('/feeds/person', Person), ('/feeds/note', Note))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for importing records in batches, with error detection.
This module converts Python dictionaries into datastore entities.
The values of all dictionary fields are Unicode strings."""
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
import datetime
import logging
import prefix
import re
import sys
from google.appengine.api import datastore_errors
from model import *
from utils import validate_sex, validate_status, get_utcnow
from utils import validate_approximate_date, validate_age
DEFAULT_PUT_RETRIES = 3
MAX_PUT_BATCH = 100
def utf8_decoder(dict_reader):
"""Yields a dictionary where all string values are converted to Unicode.
Args:
dict_reader: An iterable that yields dictionaries with string values
Yields:
A dictionary with all string values converted to Unicode.
"""
for record in dict_reader:
for key in record:
value = record[key]
if isinstance(value, str):
record[key] = value.decode('utf-8')
yield record
def put_batch(batch, retries=DEFAULT_PUT_RETRIES):
for attempt in range(retries):
try:
db.put(batch)
logging.info('Imported records: %d' % len(batch))
return len(batch)
except:
type, value, traceback = sys.exc_info()
logging.warn('Retrying batch: %s' % value)
return 0
date_re = re.compile(r'^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)Z$')
def strip(string_or_none):
if not string_or_none:
return ''
return string_or_none.strip() or ''
def validate_datetime(datetime_or_datestring):
if isinstance(datetime_or_datestring, datetime.datetime):
return datetime_or_datestring
if not datetime_or_datestring:
return None # A missing value is okay.
match = date_re.match(datetime_or_datestring)
if match:
return datetime.datetime(*map(int, match.groups()))
raise ValueError('Bad datetime: %r' % datetime_or_datestring)
def validate_boolean(string):
if not string:
return None # A missing value is okay.
return (isinstance(string, basestring) and
string.strip().lower() in ['true', '1'])
def create_person(subdomain, fields):
"""Creates a Note entity in the given subdomain's repository with the given
field values. If 'fields' contains a 'person_record_id', calling put() on
the resulting entity will overwrite any existing (original or clone) record
with the same person_record_id. Otherwise, a new original person record is
created in the given subdomain."""
person_fields = dict(
entry_date=get_utcnow(),
expiry_date=validate_datetime(fields.get('expiry_date')),
author_name=strip(fields.get('author_name')),
author_email=strip(fields.get('author_email')),
author_phone=strip(fields.get('author_phone')),
source_name=strip(fields.get('source_name')),
source_url=strip(fields.get('source_url')),
source_date=validate_datetime(fields.get('source_date')),
full_name=strip(fields.get('full_name')),
first_name=strip(fields.get('first_name')),
last_name=strip(fields.get('last_name')),
sex=validate_sex(fields.get('sex')),
date_of_birth=validate_approximate_date(fields.get('date_of_birth')),
age=validate_age(fields.get('age')),
home_street=strip(fields.get('home_street')),
home_neighborhood=strip(fields.get('home_neighborhood')),
home_city=strip(fields.get('home_city')),
home_state=strip(fields.get('home_state')),
# Fall back to 'home_zip' for backward compatibility with PFIF 1.1.
home_postal_code=strip(
fields.get('home_postal_code', fields.get('home_zip'))),
home_country=strip(fields.get('home_country')),
photo_url=strip(fields.get('photo_url')),
other=fields.get('other')
)
record_id = strip(fields.get('person_record_id'))
if record_id: # create a record that might overwrite an existing one
if is_clone(subdomain, record_id):
return Person.create_clone(subdomain, record_id, **person_fields)
else:
return Person.create_original_with_record_id(
subdomain, record_id, **person_fields)
else: # create a new original record
return Person.create_original(subdomain, **person_fields)
def create_note(subdomain, fields):
"""Creates a Note entity in the given subdomain's repository with the given
field values. If 'fields' contains a 'note_record_id', calling put() on
the resulting entity will overwrite any existing (original or clone) record
with the same note_record_id. Otherwise, a new original note record is
created in the given subdomain."""
assert strip(fields.get('person_record_id')), 'person_record_id is required'
assert strip(fields.get('source_date')), 'source_date is required'
note_fields = dict(
person_record_id=strip(fields['person_record_id']),
linked_person_record_id=strip(fields.get('linked_person_record_id')),
author_name=strip(fields.get('author_name')),
author_email=strip(fields.get('author_email')),
author_phone=strip(fields.get('author_phone')),
source_date=validate_datetime(fields.get('source_date')),
status=validate_status(fields.get('status')),
found=validate_boolean(fields.get('found')),
email_of_found_person=strip(fields.get('email_of_found_person')),
phone_of_found_person=strip(fields.get('phone_of_found_person')),
last_known_location=strip(fields.get('last_known_location')),
text=fields.get('text'),
entry_date=get_utcnow(),
)
record_id = strip(fields.get('note_record_id'))
if record_id: # create a record that might overwrite an existing one
if is_clone(subdomain, record_id):
return Note.create_clone(subdomain, record_id, **note_fields)
else:
return Note.create_original_with_record_id(
subdomain, record_id, **note_fields)
else: # create a new original record
return Note.create_original(subdomain, **note_fields)
def import_records(subdomain, domain, converter, records):
"""Convert and import a list of entries into a subdomain's respository.
Args:
subdomain: Identifies the repository in which to store the records.
domain: Accept only records that have this original domain. Only one
original domain may be imported at a time.
converter: A function to transform a dictionary of fields to a
datastore entity. This function may throw an exception if there
is anything wrong with the input fields and import_records will
skip the bad record. The key_name of the resulting datastore
entity must begin with domain + '/', or the record will be skipped.
records: A list of dictionaries representing the entries.
Returns:
The number of passed-in records that were written (not counting other
Person records that were updated because they have new Notes), a list
of (error_message, record) pairs for the skipped records, and the
number of records processed in total.
"""
if domain == HOME_DOMAIN: # not allowed, must be a subdomain
raise ValueError('Cannot import into domain %r' % HOME_DOMAIN)
persons = {} # Person entities to write
notes = {} # Note entities to write
skipped = [] # entities skipped due to an error
total = 0 # total number of entities for which conversion was attempted
for fields in records:
total += 1
try:
entity = converter(subdomain, fields)
except (KeyError, ValueError, AssertionError,
datastore_errors.BadValueError), e:
skipped.append((e.__class__.__name__ + ': ' + str(e), fields))
continue
if entity.original_domain != domain:
skipped.append(
('Not in authorized domain: %r' % entity.record_id, fields))
continue
if isinstance(entity, Person):
entity.update_index(['old', 'new'])
persons[entity.record_id] = entity
if isinstance(entity, Note):
notes[entity.record_id] = entity
# We keep two dictionaries 'persons' and 'extra_persons', with disjoint
# key sets: Person entities for the records passed in to import_records()
# go in 'persons', and any other Person entities affected by the import go
# in 'extra_persons'. The two dictionaries are kept separate in order to
# produce a count of records written that only counts 'persons'.
extra_persons = {} # updated Persons other than those being imported
# For each Note, update the latest_* fields on the associated Person.
# We do these updates in dictionaries keyed by person_record_id so that
# multiple updates for one person_record_id will mutate the same object.
for note in notes.values():
if note.person_record_id in persons:
# This Note belongs to a Person that is being imported.
person = persons[note.person_record_id]
elif note.person_record_id in extra_persons:
# This Note belongs to some other Person that is not part of this
# import and is already being updated due to another Note.
person = extra_persons[note.person_record_id]
else:
# This Note belongs to some other Person that is not part of this
# import and this is the first such Note in this import.
person = Person.get(subdomain, note.person_record_id)
if not person:
continue
extra_persons[note.person_record_id] = person
person.update_from_note(note)
# TODO(kpy): Don't overwrite existing Persons with newer source_dates.
# Now store the imported Persons and Notes, and count them.
entities = persons.values() + notes.values()
written = 0
while entities:
written += put_batch(entities[:MAX_PUT_BATCH])
entities[:MAX_PUT_BATCH] = []
# Also store the other updated Persons, but don't count them.
entities = extra_persons.values()
while entities:
put_batch(entities[:MAX_PUT_BATCH])
entities[:MAX_PUT_BATCH] = []
return written, skipped, total
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.ext import db
from google.appengine.api import taskqueue
import model
import reveal
from utils import *
from django.utils.html import escape
from django.utils.translation import ugettext as _
EMAIL_PATTERN = re.compile(r'(?:^|\s)[-a-z0-9_.%$+]+@(?:[-a-z0-9]+\.)+'
'[a-z]{2,6}(?:\s|$)', re.IGNORECASE)
def is_email_valid(email):
"""Validates an email address, returning True on correct,
False on incorrect, None on empty string."""
# Note that google.appengine.api.mail.is_email_valid() is unhelpful;
# it checks only for the empty string
if not email:
return None
if EMAIL_PATTERN.match(email):
return True
else:
return False
def get_unsubscribe_link(handler, person, email, ttl=7*24*3600):
"""Returns a link that will remove the given email address from the list
of subscribers for the given person, default ttl is one week"""
data = 'unsubscribe:%s' % email
token = reveal.sign(data, ttl)
return handler.get_url('/unsubscribe', token=token, email=email,
id=person.record_id)
def get_sender(handler):
"""Return the default sender of subscribe emails."""
# Sender address for the server must be of the following form to get
# permission to send emails: foo@app-id.appspotmail.com
# Here, the domain is automatically retrieved and altered as appropriate.
# TODO(kpy) Factor this out of subscribe
domain = handler.env.parent_domain.replace('appspot.com', 'appspotmail.com')
return 'Do Not Reply <do-not-reply@%s>' % domain
def send_notifications(person, note, handler):
"""Sends status updates about the person"""
sender = get_sender(handler)
#send messages
for sub in person.get_subscriptions():
if is_email_valid(sub.email):
django.utils.translation.activate(sub.language)
subject = _('[Person Finder] Status update for %(given_name)s '
'%(family_name)s') % {
'given_name': escape(person.first_name),
'family_name': escape(person.last_name)}
body = handler.render_to_string(
'person_status_update_email.txt',
first_name=person.first_name,
last_name=person.last_name,
note=note,
note_status_text=get_note_status_text(note),
site_url=handler.get_url('/'),
view_url=handler.get_url('/view', id=person.record_id),
unsubscribe_link=get_unsubscribe_link(handler, person,
sub.email))
taskqueue.add(queue_name='send-mail', url='/admin/send_mail',
params={'sender': sender,
'to': sub.email,
'subject': subject,
'body': body})
django.utils.translation.activate(handler.env.lang)
def send_subscription_confirmation(handler, person, email):
"""Sends subscription confirmation when person subscribes to
status updates"""
subject = _('[Person Finder] You are subscribed to status updates for '
'%(given_name)s %(family_name)s') % {
'given_name': escape(person.first_name),
'family_name': escape(person.last_name)}
body = handler.render_to_string(
'subscription_confirmation_email.txt',
first_name=person.first_name,
last_name=person.last_name,
site_url=handler.get_url('/'),
view_url=handler.get_url('/view', id=person.record_id),
unsubscribe_link=get_unsubscribe_link(handler, person, email))
taskqueue.add(queue_name='send-mail', url='/admin/send_mail',
params={'sender': get_sender(handler),
'to': email,
'subject': subject,
'body': body})
class Subscribe(Handler):
"""Handles requests to subscribe to notifications on Person and
Note record updates."""
def get(self):
person = model.Person.get(self.subdomain, self.params.id)
if not person:
return self.error(400, 'No person with ID: %r' % self.params.id)
form_action = self.get_url('/subscribe', id=self.params.id)
back_url = self.get_url('/view', id=self.params.id)
self.render('templates/subscribe_captcha.html',
person=person,
captcha_html=self.get_captcha_html(),
subscribe_email=self.params.subscribe_email or '',
form_action=form_action,
back_url=back_url,
first_name=person.first_name,
last_name=person.last_name)
def post(self):
person = model.Person.get(self.subdomain, self.params.id)
if not person:
return self.error(400, 'No person with ID: %r' % self.params.id)
if not is_email_valid(self.params.subscribe_email):
# Invalid email
captcha_html = self.get_captcha_html()
form_action = self.get_url('/subscribe', id=self.params.id)
return self.render('templates/subscribe_captcha.html',
person=person,
subscribe_email=self.params.subscribe_email,
message=_(
'Invalid e-mail address. Please try again.'),
captcha_html=captcha_html,
form_action=form_action)
existing = model.Subscription.get(self.subdomain, self.params.id,
self.params.subscribe_email)
if existing and existing.language == self.env.lang:
# User is already subscribed
url = self.get_url('/view', id=self.params.id)
link_text = _('Return to the record for %(given_name)s '
'%(family_name)s.') % {
'given_name': escape(person.first_name),
'family_name': escape(person.last_name)}
html = '<a href="%s">%s</a>' % (url, link_text)
message_html = _('You are already subscribed. ' + html)
return self.info(200, message_html=message_html)
# Check the captcha
captcha_response = self.get_captcha_response()
if not captcha_response.is_valid and not self.is_test_mode():
# Captcha is incorrect
captcha_html = self.get_captcha_html(captcha_response.error_code)
form_action = self.get_url('/subscribe', id=self.params.id)
return self.render('templates/subscribe_captcha.html',
person=person,
subscribe_email=self.params.subscribe_email,
captcha_html=captcha_html,
form_action=form_action)
if existing:
subscription = existing
subscription.language = self.env.lang
else:
subscription = model.Subscription.create(
self.subdomain, self.params.id, self.params.subscribe_email,
self.env.lang)
db.put(subscription)
send_subscription_confirmation(self, person,
self.params.subscribe_email)
url = self.get_url('/view', id=self.params.id)
link_text = _('Return to the record for %(given_name)s '
'%(family_name)s.') % {
'given_name': escape(person.first_name),
'family_name': escape(person.last_name)}
html = ' <a href="%s">%s</a>' % (url, link_text)
message_html = _('You are successfully subscribed.') + html
return self.info(200, message_html=message_html)
if __name__ == '__main__':
run(('/subscribe', Subscribe))
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| Python |
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Implementation of JSONEncoder
"""
import re
try:
from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = simplejson.load(infile)
except ValueError, e:
raise SystemExit(e)
simplejson.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from google.appengine.api import datastore_errors
from model import *
from utils import *
import prefix
import reveal
import subscribe
from django.utils.translation import ugettext as _
class View(Handler):
def get(self):
# Check the request parameters.
if not self.params.id:
return self.error(404, 'No person id was specified.')
try:
person = Person.get(self.subdomain, self.params.id)
except ValueError:
return self.error(404, 'There is no record for the specified id.')
if not person:
return self.error(404, 'There is no record for the specified id.')
standalone = self.request.get('standalone')
# Check if private info should be revealed.
content_id = 'view:' + self.params.id
reveal_url = reveal.make_reveal_url(self, content_id)
show_private_info = reveal.verify(content_id, self.params.signature)
# Get the notes and duplicate links.
try:
notes = person.get_notes()
except datastore_errors.NeedIndexError:
notes = []
person.sex_text = get_person_sex_text(person)
for note in notes:
note.status_text = get_note_status_text(note)
note.linked_person_url = \
self.get_url('/view', id=note.linked_person_record_id)
note.flag_spam_url = \
self.get_url('/flag_note', id=note.note_record_id,
hide=(not note.hidden) and 'yes' or 'no',
signature=self.params.signature)
try:
linked_persons = person.get_linked_persons()
except datastore_errors.NeedIndexError:
linked_persons = []
linked_person_info = [
dict(id=p.record_id,
name="%s %s" % (p.first_name, p.last_name),
view_url=self.get_url('/view', id=p.record_id))
for p in linked_persons]
# Render the page.
dupe_notes_url = self.get_url(
'/view', id=self.params.id, dupe_notes='yes')
results_url = self.get_url(
'/results',
role=self.params.role,
query=self.params.query,
first_name=self.params.first_name,
last_name=self.params.last_name)
feed_url = self.get_url(
'/feeds/note',
person_record_id=self.params.id,
subdomain=self.subdomain)
subscribe_url = self.get_url('/subscribe', id=self.params.id)
self.render('templates/view.html',
person=person,
notes=notes,
linked_person_info=linked_person_info,
standalone=standalone,
onload_function='view_page_loaded()',
show_private_info=show_private_info,
admin=users.is_current_user_admin(),
dupe_notes_url=dupe_notes_url,
results_url=results_url,
reveal_url=reveal_url,
feed_url=feed_url,
subscribe_url=subscribe_url)
def post(self):
if not self.params.text:
return self.error(
200, _('Message is required. Please go back and try again.'))
if not self.params.author_name:
return self.error(
200, _('Your name is required in the "About you" section. '
'Please go back and try again.'))
if self.params.status == 'is_note_author' and not self.params.found:
return self.error(
200, _('Please check that you have been in contact with '
'the person after the earthquake, or change the '
'"Status of this person" field.'))
note = Note.create_original(
self.subdomain,
entry_date=get_utcnow(),
person_record_id=self.params.id,
author_name=self.params.author_name,
author_email=self.params.author_email,
author_phone=self.params.author_phone,
source_date=get_utcnow(),
found=bool(self.params.found),
status=self.params.status,
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person,
last_known_location=self.params.last_known_location,
text=self.params.text)
entities_to_put = [note]
# Update the Person based on the Note.
person = Person.get(self.subdomain, self.params.id)
if person:
person.update_from_note(note)
# Send notification to all people
# who subscribed to updates on this person
subscribe.send_notifications(person, note, self)
entities_to_put.append(person)
# Write one or both entities to the store.
db.put(entities_to_put)
# If user wants to subscribe to updates, redirect to the subscribe page
if self.params.subscribe:
return self.redirect('/subscribe', id=person.record_id,
subscribe_email=self.params.author_email)
# Redirect to this page so the browser's back button works properly.
self.redirect('/view', id=self.params.id, query=self.params.query)
if __name__ == '__main__':
run(('/view', View))
| Python |
#!/usr/bin/python2.4
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for a Turing test page, and utility functions for other pages,
to guard the display of sensitive information."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
import cgi
import os
import pickle
import random
import sha
import time
from google.appengine.api import users
from recaptcha.client import captcha
from model import Secret
from utils import *
# ==== Key management ======================================================
def generate_random_key():
"""Generates a random 20-byte key."""
return ''.join(chr(random.randrange(256)) for i in range(20))
def get_reveal_key():
"""Gets the secret key for authorizing reveal operations."""
secret = Secret.get_by_key_name('reveal')
if not secret:
secret = Secret(key_name='reveal', secret=generate_random_key())
secret.put()
return secret.secret
# ==== Signature generation and verification ===============================
def sha1_hash(string):
"""Computes the SHA-1 hash of the given string."""
return sha.new(string).digest()
def xor(string, byte):
"""Exclusive-ors each character in a string with the given byte."""
results = []
for ch in string:
results.append(chr(ord(ch) ^ byte))
return ''.join(results)
def hmac(key, data, hash=sha1_hash):
"""Produces an HMAC for the given data."""
return hash(xor(key, 0x5c) + hash(xor(key, 0x36) + pickle.dumps(data)))
def sign(data, lifetime=600):
"""Produces a limited-time signature for the given data."""
expiry = int(time.time() + lifetime)
key = get_reveal_key()
return hmac(key, (expiry, data)).encode('hex') + '.' + str(expiry)
def verify(data, signature):
"""Checks that a signature matches the given data and hasn't yet expired."""
try:
mac, expiry = signature.split('.', 1)
mac, expiry = mac.decode('hex'), int(expiry)
except (TypeError, ValueError):
return False
key = get_reveal_key()
return time.time() < expiry and hmac(key, (expiry, data)) == mac
def make_reveal_url(handler, content_id):
"""Produces a link to this reveal handler that, on success, redirects back
to the given 'target' URL with a signature for the given 'content_id'."""
return handler.get_url(
'/reveal', target=handler.request.url, content_id=content_id)
# ==== The reveal page, which authorizes revelation ========================
# To use this facility, handlers that want to optionally show sensitive
# information should do the following:
#
# 1. Construct a 'content_id' string that canonically identifies what is
# being requested (e.g. the name of the page and any parameters that
# control what is shown on the page).
#
# 2. Call reveal.verify(content_id, self.params.signature) to find out
# whether the sensitive information should be shown.
#
# 3. If reveal.verify() returns False, then replace the sensitive information
# with a link to make_reveal_url(self, content_id).
class Reveal(Handler):
def get(self):
# For now, signing in is sufficient to reveal information.
# We could put a Turing test here instead.
user = users.get_current_user()
self.render('templates/reveal.html', user=user,
captcha_html=self.get_captcha_html())
def post(self):
captcha_response = self.get_captcha_response()
if captcha_response.is_valid or self.is_test_mode():
signature = sign(self.params.content_id)
self.redirect(
set_url_param(self.params.target, 'signature', signature))
else:
self.render(
'templates/reveal.html', user=users.get_current_user(),
captcha_html=self.get_captcha_html(),
content_id=self.params.content_id)
if __name__ == '__main__':
run(('/reveal', Reveal))
| Python |
#!/usr/bin/python2.5
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from utils import Handler, get_utcnow, set_utcnow_for_test, run
from datetime import datetime
class SetUtcnow(Handler):
"""Sets or clears the utcnow_for_test value, FOR TESTING ONLY.
To unset utcnow_for_test:
http://localhost:8080/admin/set_utcnow_for_test?test_mode=yes
To set utcnow_for_test:
http://localhost:8080/admin/set_utcnow_for_test?utcnow=1295662977&test_mode=yes
The timestamp should be in time.time() format. One way to get this value
would be to create a datetime object and call time.mktime(dt.utctimetuple()).
Time objects lack timezone info, so make sure the input value is UTC.
"""
subdomain_required = False # Run at the root domain, not a subdomain.
def get(self):
utcnow_before_change = get_utcnow()
utcnow = self.params.utcnow
if self.is_test_mode():
try:
logging.info('Setting utcnow to %r' % utcnow)
set_utcnow_for_test(utcnow)
self.render('templates/set_utcnow.html', utcnow=get_utcnow(),
utcbefore=utcnow_before_change)
except Exception, e:
# bad param.
return self.error(400, 'bad timestamp %s, e=%s' % (utcnow, e))
else:
return self.error(404, 'page not found')
if __name__ == '__main__':
run(('/admin/set_utcnow_for_test', SetUtcnow))
| Python |
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import time
from google.appengine.api import quota
from google.appengine.api import taskqueue
from google.appengine.ext import db
import delete
import model
import utils
CPU_MEGACYCLES_PER_REQUEST = 1000
EXPIRED_TTL = datetime.timedelta(delete.EXPIRED_TTL_DAYS, 0, 0)
FETCH_LIMIT = 100
class DeleteExpired(utils.Handler):
"""Scans the Person table looking for expired records to delete, updating
the is_expired flag on all records whose expiry_date has passed. Records
that expired more than EXPIRED_TTL in the past will also have their data
fields, notes, and photos permanently deleted."""
URL = '/tasks/delete_expired'
subdomain_required = False
def get(self):
query = model.Person.past_due_records()
for person in query:
if quota.get_request_cpu_usage() > CPU_MEGACYCLES_PER_REQUEST:
# Stop before running into the hard limit on CPU time per
# request, to avoid aborting in the middle of an operation.
# TODO(kpy): Figure out whether to queue another task here.
# Is it safe for two tasks to run in parallel over the same
# set of records returned by the query?
break
person.put_expiry_flags()
if (person.expiry_date and
utils.get_utcnow() - person.expiry_date > EXPIRED_TTL):
person.wipe_contents()
def run_count(make_query, update_counter, counter):
"""Scans the entities matching a query for a limited amount of CPU time."""
while quota.get_request_cpu_usage() < CPU_MEGACYCLES_PER_REQUEST:
# Get the next batch of entities.
query = make_query()
if counter.last_key:
query = query.filter('__key__ >', db.Key(counter.last_key))
entities = query.order('__key__').fetch(FETCH_LIMIT)
if not entities:
counter.last_key = ''
break
# Pass the entities to the counting function.
for entity in entities:
update_counter(counter, entity)
# Remember where we left off.
counter.last_key = str(entities[-1].key())
class CountBase(utils.Handler):
"""A base handler for counting tasks. Making a request to this handler
without a subdomain will start tasks for all subdomains in parallel.
Each subclass of this class handles one scan through the datastore."""
subdomain_required = False # Run at the root domain, not a subdomain.
SCAN_NAME = '' # Each subclass should choose a unique scan_name.
URL = '' # Each subclass should set the URL path that it handles.
def get(self):
if self.subdomain: # Do some counting.
counter = model.Counter.get_unfinished_or_create(
self.subdomain, self.SCAN_NAME)
run_count(self.make_query, self.update_counter, counter)
counter.put()
if counter.last_key: # Continue counting in another task.
self.add_task(self.subdomain)
else: # Launch counting tasks for all subdomains.
for subdomain in model.Subdomain.list():
self.add_task(subdomain)
def add_task(self, subdomain):
"""Queues up a task for an individual subdomain."""
timestamp = utils.get_utcnow().strftime('%Y%m%d-%H%M%S')
task_name = '%s-%s-%s' % (subdomain, self.SCAN_NAME, timestamp)
taskqueue.add(name=task_name, method='GET', url=self.URL,
params={'subdomain': subdomain})
def make_query(self):
"""Subclasses should implement this. This will be called to get the
datastore query; it should always return the same query."""
def update_counter(self, counter, entity):
"""Subclasses should implement this. This will be called once for
each entity that matches the query; it should call increment() on
the counter object for whatever accumulators it wants to increment."""
class CountPerson(CountBase):
SCAN_NAME = 'person'
URL = '/tasks/count/person'
def make_query(self):
return model.Person.all().filter('subdomain =', self.subdomain)
def update_counter(self, counter, person):
found = ''
if person.latest_found is not None:
found = person.latest_found and 'TRUE' or 'FALSE'
counter.increment('all')
counter.increment('original_domain=' + (person.original_domain or ''))
counter.increment('source_name=' + (person.source_name or ''))
counter.increment('sex=' + (person.sex or ''))
counter.increment('home_country=' + (person.home_country or ''))
counter.increment('photo=' + (person.photo_url and 'present' or ''))
counter.increment('num_notes=%d' % len(person.get_notes()))
counter.increment('status=' + (person.latest_status or ''))
counter.increment('found=' + found)
class CountNote(CountBase):
SCAN_NAME = 'note'
URL = '/tasks/count/note'
def make_query(self):
return model.Note.all().filter('subdomain =', self.subdomain)
def update_counter(self, counter, note):
found = ''
if note.found is not None:
found = note.found and 'TRUE' or 'FALSE'
counter.increment('all')
counter.increment('status=' + (note.status or ''))
counter.increment('found=' + found)
counter.increment(
'location=' + (note.last_known_location and 'present' or ''))
if __name__ == '__main__':
utils.run((CountPerson.URL, CountPerson),
(CountNote.URL, CountNote),
(DeleteExpired.URL, DeleteExpired))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.