code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
# vim: set filetype=python expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
# -*- coding: utf-8 -*-
#
# test json rpc call
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from django.utils import simplejson
class RPCHandler(webapp.RequestHandler):
""" Allows the functions defined in the RPCMethods class to be RPCed."""
def __init__(self):
webapp.RequestHandler.__init__(self)
self.methods = RPCMethods()
def get(self):
func = None
action = self.request.get('action')
if action:
if action[0] == '_':
self.error(403) # access denied
return
else:
func = getattr(self.methods, action, None)
if not func:
self.error(404) # file not found
return
args = {}
keys = self.request.arguments()
for arg in keys:
if arg != 'action':
args[arg] = self.request.get(arg)
result = func(args)
self.response.out.write(simplejson.dumps(result))
def post(self):
args = simplejson.loads(self.request.body)
#func, args = args[0], args[1:]
func = args['action']
del args['action']
if func[0] == '_':
self.error(403) # access denied
return
func = getattr(self.methods, func, None)
if not func:
self.error(404) # file not found
return
result = func(args)
self.response.out.write(simplejson.dumps(result))
class RPCMethods:
""" Defines the methods that can be RPCed.
NOTE: Do not allow remote callers access to private/protected "_*" methods.
"""
def Add(self, *args):
# The JSON encoding may have encoded integers as strings.
# Be sure to convert args to any mandatory type(s).
#ints = [int(arg) for arg in args]
#return int(args[0]) + int(args[1])
tagsdic = args[0]
sum = 0
for key in tagsdic:
sum += int(tagsdic[key])
#return sum
return '{"sum": "%d"}' % sum
def Sub(self, *args):
tagsdict = args[0]
def main():
# path router
application = webapp.WSGIApplication([
('/rpc', RPCHandler)
], debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# vim: set filetype=python expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
# -*- coding: utf-8 -*-
#
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
# test fetch url
class Fetch(webapp.RequestHandler):
def get(self):
self.response.out.write("""
<html>
<head>Fetch A Url</head>
<body>
<form action="/fetchme" enctype="multipart/form-data" method="post">
<div><label>Plese input a valid url(begin with http):</label></div>
<div><input type="text" name="url"/></div>
<div><input type="submit" value="Fetch me!"></div>
</body>
</html>""")
class Fetchme(webapp.RequestHandler):
def post(self):
url = self.request.get("url")
result = urlfetch.fetch(url)
if result.status_code == 200:
self.response.out.write(result.content)
else :
self.response.out.write(str(result.headers))
| Python |
#!/usr/bin/env python
# vim: set filetype=python expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
# -*- coding: utf-8 -*-
#
# test json rpc call
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from django.utils import simplejson
class RPCHandler(webapp.RequestHandler):
""" Allows the functions defined in the RPCMethods class to be RPCed."""
def __init__(self):
webapp.RequestHandler.__init__(self)
self.methods = RPCMethods()
def get(self):
func = None
action = self.request.get('action')
if action:
if action[0] == '_':
self.error(403) # access denied
return
else:
func = getattr(self.methods, action, None)
if not func:
self.error(404) # file not found
return
args = {}
keys = self.request.arguments()
for arg in keys:
if arg != 'action':
args[arg] = self.request.get(arg)
result = func(args)
self.response.out.write(simplejson.dumps(result))
def post(self):
args = simplejson.loads(self.request.body)
#func, args = args[0], args[1:]
func = args['action']
del args['action']
if func[0] == '_':
self.error(403) # access denied
return
func = getattr(self.methods, func, None)
if not func:
self.error(404) # file not found
return
result = func(args)
self.response.out.write(simplejson.dumps(result))
class RPCMethods:
""" Defines the methods that can be RPCed.
NOTE: Do not allow remote callers access to private/protected "_*" methods.
"""
def Add(self, *args):
# The JSON encoding may have encoded integers as strings.
# Be sure to convert args to any mandatory type(s).
#ints = [int(arg) for arg in args]
#return int(args[0]) + int(args[1])
tagsdic = args[0]
sum = 0
for key in tagsdic:
sum += int(tagsdic[key])
#return sum
return '{"sum": "%d"}' % sum
def Sub(self, *args):
tagsdict = args[0]
def main():
# path router
application = webapp.WSGIApplication([
('/rpc', RPCHandler)
], debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# vim: set filetype=python expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
# -*- coding: utf-8 -*-
#
from google.appengine.ext import webapp
from google.appengine.ext import db
import os
class Point(db.Expando):
""" save a point with a comment """
lat = db.StringProperty(required=True)
lng = db.StringProperty(required=True)
comment = db.StringProperty(required=True)
# test print envs
class SavePoint(webapp.RequestHandler):
def get(self):
lat = self.request.get('lat')
lng = self.request.get('lng')
comment = self.request.get('comment')
p = Point(lat=lat, lng=lng, comment=comment)
key = p.put()
self.response.out.write('Point (%s,%s [%s]) Saved with key (%d) Succ!' % (lat, lng, comment, key.id_or_name()))
| Python |
#!/usr/bin/env python
# vim: set filetype=python expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
# -*- coding: utf-8 -*-
#
import os
import wsgiref.handlers
#import cgi
import datetime
import logging
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.api import images
from google.appengine.ext import db
#from google.appengine.ext.db import djangoforms
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import run_wsgi_app
from django.utils import simplejson
from printenv import PrintEnvironment
from savePoint import SavePoint
from guestbook import *
from fetchurl import *
from jsonrpc import *
logging.getLogger().setLevel(logging.DEBUG)
# path router
application = webapp.WSGIApplication([
('/', MainPage),
('/img', Image),
('/sign', Guestbook),
('/printenv', PrintEnvironment),
('/savepoint', SavePoint),
('/fetch', Fetch),
('/fetchme', Fetchme),
('/rpc', RPCHandler)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# vim: set filetype=python expandtab tabstop=2 shiftwidth=2 autoindent smartindent:
# -*- coding: utf-8 -*-
#
import cgi
import datetime
import logging
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import images
#logging.getLogger().setLevel(logging.DEBUG)
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
avatar = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
query_str = "SELECT * FROM Greeting ORDER BY date DESC LIMIT 10"
greetings = db.GqlQuery (query_str)
for greeting in greetings:
if greeting.author:
self.response.out.write('<b>%s</b> wrote:' % greeting.author.nickname())
else:
self.response.out.write('An anonymous person wrote:')
self.response.out.write("<div><img src='img?img_id=%s'></img>" %
greeting.key())
self.response.out.write(' %s</div>' %
cgi.escape(greeting.content))
self.response.out.write("""
<form action="/sign" enctype="multipart/form-data" method="post">
<div><label>Message:</label></div>
<div><textarea name="content" rows="3" cols="60"></textarea></div>
<div><label>Avatar:</label></div>
<div><input type="file" name="img"/></div>
<div><input type="submit" value="Sign Guestbook"></div>
</form>
</body>
</html>""")
class Image (webapp.RequestHandler):
def get(self):
greeting = db.get(self.request.get("img_id"))
if greeting.avatar:
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(greeting.avatar)
else:
self.response.out.write("No image")
class Guestbook(webapp.RequestHandler):
def post(self):
greeting = Greeting()
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get("content")
avatar = images.resize(self.request.get("img"), 32, 32)
greeting.avatar = db.Blob(avatar)
greeting.put()
self.redirect('/')
| Python |
# Run from the commandline:
#
# python server.py
# POST audio to http://localhost:9000
# GET audio from http://localhost:9000
#
# A simple server to collect audio using python. To be more secure,
# you might want to check the file names and place size restrictions
# on the incoming data.
import cgi
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class WamiHandler(BaseHTTPRequestHandler):
dirname = "/tmp/"
def do_GET(self):
f = open(self.get_name())
self.send_response(200)
self.send_header('content-type','audio/x-wav')
self.end_headers()
self.wfile.write(f.read())
f.close()
def do_POST(self):
f = open(self.get_name(), "wb")
# Note that python's HTTPServer doesn't support chunked transfer.
# Thus, it requires a content-length.
length = int(self.headers.getheader('content-length'))
print "POST of length " + str(length)
f.write(self.rfile.read(length))
f.close();
def get_name(self):
filename = 'output.wav';
qs = self.path.split('?',1);
if len(qs) == 2:
params = cgi.parse_qs(qs[1])
if params['name']:
filename = params['name'][0];
return WamiHandler.dirname + filename
def main():
try:
server = HTTPServer(('', 9000), WamiHandler)
print 'Started server...'
server.serve_forever()
except KeyboardInterrupt:
print 'Stopping server'
server.socket.close()
if __name__ == '__main__':
main()
| Python |
import struct
import os
import os.path
import sys
import string
import time
import urllib
from HTMLParser import HTMLParser
import json
import shutil
import datetime
from datetime import timedelta
import ConfigParser
class WebFetcher:
def get_web_data(self, url):
html = urllib.urlopen(url)
html = html.read()
html = unicode(html, 'gbk')
html = html.encode('utf8')
return html
class FundHTMLParser(HTMLParser):
selected=('table','tr','td')
def __init__ (self):
HTMLParser.__init__(self)
def read(self, data):
self._lines = []
self.reset()
self.feed(data)
return self._lines
def reset(self):
HTMLParser.reset(self)
self._level_stack = []
def handle_starttag(self, tag, attrs):
if tag in FundHTMLParser.selected:
self._level_stack.append(tag)
def handle_endtag(self, tag):
if self._level_stack and tag in FundHTMLParser.selected and tag == self._level_stack[-1]:
self._level_stack.pop()
def handle_data(self, data):
if "/".join(self._level_stack) in ('table/tr/td/table/tr/td', 'table/tr/td') and data != '\n':
self._lines.append(data)
class MySQLAdapter:
def __init__(self, config):
self.host = config.get('mysql', 'host')
self.user = config.get('mysql', 'user')
self.password = config.get('mysql', 'password')
self.db_name = config.get('mysql', 'db_name')
def save_base_info(self, data, sql_file):
sql = 'insert into FUD_BASE values('
i = 0
values = ''
for item in data:
if i==0:
values = values + '\'' + item.strip()+'\''
else:
values = values + ',' + '\'' + item.strip()+'\''
i = i+1
sql_file.write(sql+values+');\n')
def save_history_value(self, data, sql_file):
sql = 'insert into FUD_HISVALUE values ('
values = ''
i = 0
for item in data:
if i==0:
values = values + '\'' + item.strip()+'\''
else:
values = values + ',' + '\'' + item.strip()+'\''
i = i+1
sql_file.write(sql+values+');\n')
def get_records(self, sql):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.password,db=self.db_name)
cursor = conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
return result
def run(self, sql_file):
print sql_file
class MyUtils:
def getYesterday(self):
now = datetime.datetime.now()
delta = datetime.timedelta(days=-1)
yesterday = now + delta
yesterday = yesterday.strftime('%Y-%m-%d')
return yesterday
def getToday(self):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d');
def divide_array(self, items, group_count):
result = []
record_count = len(items)
array_size = record_count / group_count
m=0
while m<group_count:
n = 0
sub_result = []
while n<array_size:
index = (m*array_size) + n
sub_result.append(items[index])
n = n + 1
result.append(sub_result)
m = m +1
handled_total = group_count * array_size
remain = record_count - handled_total
if remain>0:
i = 0
while i<remain:
result[i].append(items[handled_total + i])
i = i + 1
return result
| Python |
import urllib
import struct
import os
import os.path
import sys
import string
import time
from HTMLParser import HTMLParser
import json
import shutil
import datetime
from datetime import timedelta
from utils import *
import threading
import MySQLdb
import ConfigParser
class AppConfig:
def __init__(self):
self.config = {}
def load_config(self):
config = ConfigParser.RawConfigParser()
config.read('config.ini')
return config
class DailyFetcher:
def __init__(self, config, execution_date, web_fetcher):
self.config = config
self.sql_path = config.get('path', 'sql_path')
self.execution_date = execution_date
self.web_fetcher = web_fetcher
def run(self):
utils = MyUtils();
url = self.config.get('url', 'fund_url_daily')
url = url +'date=' + self.execution_date
i = 1 # types of all funds
result = []
while i <= 1:
url = url+'&type=' + str(i)
i = i + 1
html = self.web_fetcher.get_web_data(url)
html = json.loads(html)
items = html['data']
m = 0
for item in items:
m = m+1
result.append(item['fundCode'])
return result
class BaseInfoFetcher:
def __init__(self, config, sql_file, web_fetcher, adapter):
self.config = config
self.web_fetcher = web_fetcher
self.adapter = adapter
self.sql_file = sql_file
def run(self, items):
for fund_code in items:
url = config.get('url', 'fund_url_base');
url = url+'fundcode=' + fund_code
print url
html = self.web_fetcher.get_web_data(url)
path = config.get('path','init_path')
file_name = path + 'base_info_' + fund_code
file = open(file_name, 'a')
file.write(html)
file.close()
parser = FundHTMLParser()
all_data = parser.read(html);
data = self.get_base_info(all_data)
self.adapter.save_base_info(data, self.sql_file)
def get_base_info(self, data):
result = []
i = 0
while i<16:
if (i % 2 == 1):
result.append(data[i])
i = i+1
return result
class HistoryFetcher:
def __init__(self, config, is_initial, sql_file, web_fetcher, adapter):
self.config = config
self.is_initial = is_initial
self.web_fetcher = web_fetcher
self.adapter = adapter
self.sql_file = sql_file
def run(self, items):
for fund_code in items:
url = config.get('url', 'fund_url_history');
url = url+'fundcode=' + fund_code
print url
html = self.web_fetcher.get_web_data(url)
path = config.get('path','init_path')
parser = FundHTMLParser()
html = parser.read(html);
html = self.get_base_info(html)
i = 0
sql_value = []
for item in html:
if i % 8 ==0:
if sql_value != []:
self.adapter.save_history_value(sql_value, self.sql_file)
sql_value = []
sql_value.append(item)
i = i+1
def get_base_info(self, data):
result = []
if self.is_initial:
i = 0
for item in data:
if i>=7:
result.append(item)
i = i +1
else:
m = 7;
while m<=15 :
m = m + 1
result.append(data[m])
return result
def get_not_exist_funds(self, fund_items):
sql = 'select fund_code from fud_base'
db_records = self.adapter.get_records(sql)
result = []
for item in fund_items:
exist = false
for record in db_records:
if item == record:
exist = true
break
if not exist:
result.append(item)
return result
class FundInfo:
def __init__(self, config, is_inital):
self.config = config
self.sql_path = config.get('path', 'sql_path')
self.is_initial = is_initial
self.thread_count = int(config.get('threads', 'thread_count'))
def run(self):
utils = MyUtils()
execution_date = utils.getYesterday()
web_fetcher = WebFetcher()
daily = DailyFetcher(self.config, execution_date, web_fetcher)
fund_items = daily.run()
divided_items = utils.divide_array(fund_items, int(self.thread_count))
thread_pool = []
index = 0
while index < self.thread_count-1:
th = threading.Thread(target=self.work, args=(self.config, self.is_initial,divided_items[index], index, execution_date))
thread_pool.append(th)
index = index + 1
for t in thread_pool:
t.start()
def work(self, config, is_initial, fund_items, thread_index, execution_date):
sql_base_info_file = self.sql_path + 'sql_base_info_'+execution_date+'_'+str(thread_index)
sql_history_value_file = self.sql_path + 'sql_history_value_'+execution_date+'_'+str(thread_index)
f_sql_base_info = open(sql_base_info_file, 'a')
f_sql_history_value = open(sql_history_value_file,'a')
web_fetcher = WebFetcher()
adapter = MySQLAdapter(config)
print 'thread_index:' + str(thread_index)
print fund_items
if self.is_initial:
baseInfoFetcher = BaseInfoFetcher(config, f_sql_base_info, web_fetcher, adapter)
historyFetcher = HistoryFetcher(config, is_initial, f_sql_history_value, web_fetcher, adapter)
baseInfoFetcher.run(fund_items)
historyFetcher.run(fund_items)
else:
historyFetcher = HistoryFetcher(config, is_initial, f_sql_history_value, web_fetcher, adapter)
not_exist_fund_items = historyFetcher.get_not_exist_funds(fund_items)
if not_exist_funds != []:
baseInfoFetcher = BaseInfoFetcher(config, f_sql_base_info, web_fetcher, adapter)
baseInfoFetcher.run(not_exist_funds)
historyFetcher.run(fund_items)
f_sql_base_info.close()
f_sql_history_value.close()
# main entry.
if __name__ == "__main__":
config = AppConfig()
config = config.load_config()
is_initial = config.get('initial', 'is_initial')
fetcher = FundInfo(config, is_initial);
fetcher.run()
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
import MySQLdb
import sys
import time
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd='gemj820812',db='shumi')
except:
sys.exit()
cursor = conn.cursor()
now = time.strftime("%Y-%m-%d")
sql = "insert into novel_catch(title, classname, url, author, uptime, state) values ('%s','%s', '%s', '%s', '%s', %d)" % ('test', 'shuji', 'aaaaaaa', 'big', now,1)
print sql
try:
cursor.execute(sql)
print '111'
except:
sys.exit()
print '222'
cursor.close()
conn.close()
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to support users unsubscribing from notifications."""
__author__ = 'John Orr (jorr@google.com)'
import os
import urllib
import urlparse
import appengine_config
from common import crypto
from controllers import utils
from models import custom_modules
from models import entities
from models import services
from google.appengine.api import users
from google.appengine.ext import db
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'unsubscribe', 'templates')
def get_unsubscribe_url(handler, email):
"""Create an individualized unsubscribe link for a user.
Args:
handler: controllers.utils.ApplicationHandler. The current request
handler.
email: string. The email address of the users for whom the unsubscribe
link is being generated.
Returns:
string. A URL for the users to unsubscribe from notifications.
"""
abs_url = urlparse.urljoin(
handler.get_base_href(handler), UnsubscribeHandler.URL[1:])
query = urllib.urlencode({
'email': email,
's': _get_signature(handler, email)})
return '%s?%s' % (abs_url, query)
def get_resubscribe_url(handler, email):
"""Create an individualized resubscribe link for a user.
Args:
handler: controllers.utils.ApplicationHandler. The current request
handler.
email: string. The email address of the users for whom the resubscribe
link is being generated.
Returns:
string. A URL for the users to resubscribe to notifications.
"""
abs_url = urlparse.urljoin(
handler.get_base_href(handler), UnsubscribeHandler.URL[1:])
query = urllib.urlencode({
'email': email,
's': _get_signature(handler, email),
'action': UnsubscribeHandler.RESUBSCRIBE_ACTION})
return '%s?%s' % (abs_url, query)
def has_unsubscribed(email):
"""Check whether the user has requested to be unsubscribed.
Args:
email: string. The email address of the user.
Returns:
bool. True if the user has requested to be unsubscribed.
"""
model = SubscriptionStateEntity.get_by_key_name(email)
return (model is not None) and not model.is_subscribed
def set_subscribed(email, is_subscribed):
"""Set the state of a given user.
Args:
email: string. The email address of the user.
is_subscribed: bool. The state to set. True means that the user is
subscribed and should continue to receive emails; False means that
they should not.
Returns:
None.
"""
model = SubscriptionStateEntity.get_by_key_name(email)
if model is None:
model = SubscriptionStateEntity(key_name=email)
model.is_subscribed = is_subscribed
model.put()
class UnsubscribeHandler(utils.BaseHandler):
"""Receive an unsubscribe request and process it."""
URL = '/modules/unsubscribe'
RESUBSCRIBE_ACTION = 'resubscribe'
def get(self):
email = self.request.get('email')
if email:
signature = self.request.get('s')
if signature != _get_signature(self, email):
self.error(401)
return
else:
# If no email and signature is provided, unsubscribe will prompt
# for login. NOTE: This is only intended to support access by users
# who are known to have already registered with Course Builder. In
# general subscription management should use the encoded email and
# signature as this places the minimum burden on the user when
# unsubscribing (ie no need for Google account, no need for login).
user = self.get_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
return
email = user.email()
action = self.request.get('action')
if action == self.RESUBSCRIBE_ACTION:
set_subscribed(email, True)
template_file = 'resubscribe.html'
else:
set_subscribed(email, False)
template_file = 'unsubscribe.html'
self.template_value[
'resubscribe_url'] = get_resubscribe_url(self, email)
self.template_value['navbar'] = {}
self.template_value['email'] = email
template = self.get_template(template_file, [TEMPLATES_DIR])
self.response.out.write(template.render(self.template_value))
def _get_signature(handler, email):
return crypto.EncryptionManager.hmac(
[email, handler.app_context.get_namespace_name()]).encode('hex')
class SubscriptionStateEntity(entities.BaseEntity):
"""Entity which holds the subscription state of a user.
This entity must be given a key_name equal to the email address of the user
whose subscription state is being set.
"""
is_subscribed = db.BooleanProperty(indexed=False)
def __init__(self, *args, **kwargs):
if 'key' not in kwargs and 'key_name' not in kwargs:
raise db.BadValueError('key_name must be email address')
super(SubscriptionStateEntity, self).__init__(*args, **kwargs)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key(cls.kind(), transform_fn(db_key.name()))
custom_module = None
def register_module():
"""Registers this module in the registry."""
namespaced_routes = [
(UnsubscribeHandler.URL, UnsubscribeHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Unsubscribe Module',
'A module to enable unsubscription from emails.',
[], namespaced_routes)
class Service(services.Unsubscribe):
def enabled(self):
return custom_module.enabled
def get_unsubscribe_url(self, handler, email):
return get_unsubscribe_url(handler, email)
def has_unsubscribed(self, email):
return has_unsubscribed(email)
def set_subscribed(self, email, is_subscribed):
return set_subscribed(email, is_subscribed)
services.unsubscribe = Service()
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module providing handlers for URLs related to map/reduce and pipelines."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import re
import urllib
from mapreduce import main as mapreduce_main
from mapreduce import parameters as mapreduce_parameters
from mapreduce.lib.pipeline import models as pipeline_models
from mapreduce.lib.pipeline import pipeline
from common import safe_dom
from common.utils import Namespace
from controllers import sites
from controllers import utils
from models import custom_modules
from models import data_sources
from models import jobs
from models import roles
from models import transforms
from models.config import ConfigProperty
from google.appengine.api import files
from google.appengine.api import users
from google.appengine.ext import db
# Module registration
custom_module = None
MODULE_NAME = 'Map/Reduce'
XSRF_ACTION_NAME = 'view-mapreduce-ui'
MAX_MAPREDUCE_METADATA_RETENTION_DAYS = 3
GCB_ENABLE_MAPREDUCE_DETAIL_ACCESS = ConfigProperty(
'gcb_enable_mapreduce_detail_access', bool,
safe_dom.NodeList().append(
safe_dom.Element('p').add_text("""
Enables access to status pages showing details of progress for individual
map/reduce jobs as they run. These pages can be used to cancel jobs or
sub-jobs. This is a benefit if you have launched a huge job that is
consuming too many resources, but a hazard for naive users.""")
).append(
safe_dom.Element('p').add_child(
safe_dom.A('/mapreduce/ui/pipeline/list', target='_blank').add_text("""
See an example page (with this control enabled)"""))
), False, multiline=False, validator=None)
def authorization_wrapper(self, *args, **kwargs):
# developers.google.com/appengine/docs/python/taskqueue/overview-push
# promises that this header cannot be set by external callers. If this
# is present, we can be certain that the request is internal and from
# the task queue worker. (This is belt-and-suspenders with the admin
# restriction on /mapreduce/worker*)
if 'X-AppEngine-TaskName' not in self.request.headers:
self.response.out.write('Forbidden')
self.response.set_status(403)
return
self.real_dispatch(*args, **kwargs)
def ui_access_wrapper(self, *args, **kwargs):
content_is_static = (
self.request.path.startswith('/mapreduce/ui/') and
(self.request.path.endswith('.css') or
self.request.path.endswith('.js')))
xsrf_token = self.request.get('xsrf_token')
user_is_course_admin = utils.XsrfTokenManager.is_xsrf_token_valid(
xsrf_token, XSRF_ACTION_NAME)
ui_enabled = GCB_ENABLE_MAPREDUCE_DETAIL_ACCESS.value
if ui_enabled and (content_is_static or
user_is_course_admin or
users.is_current_user_admin()):
namespace = self.request.get('namespace')
with Namespace(namespace):
self.real_dispatch(*args, **kwargs)
# Some places in the pipeline UI are good about passing the
# URL's search string along to RPC calls back to Ajax RPCs,
# which automatically picks up our extra namespace and xsrf
# tokens. However, some do not, and so we patch it
# here, rather than trying to keep up-to-date with the library.
params = {}
if namespace:
params['namespace'] = namespace
if xsrf_token:
params['xsrf_token'] = xsrf_token
extra_url_params = urllib.urlencode(params)
if self.request.path == '/mapreduce/ui/pipeline/status.js':
self.response.body = self.response.body.replace(
'rpc/tree?',
'rpc/tree\' + window.location.search + \'&')
elif self.request.path == '/mapreduce/ui/pipeline/rpc/tree':
self.response.body = self.response.body.replace(
'/mapreduce/worker/detail?',
'/mapreduce/ui/detail?' + extra_url_params + '&')
elif self.request.path == '/mapreduce/ui/detail':
self.response.body = self.response.body.replace(
'src="status.js"',
'src="status.js?%s"' % extra_url_params)
elif self.request.path == '/mapreduce/ui/status.js':
replacement = (
'\'namespace\': \'%s\', '
'\'xsrf_token\': \'%s\', '
'\'mapreduce_id\':' % (
namespace if namespace else '',
xsrf_token if xsrf_token else ''))
self.response.charset = 'utf8'
self.response.text = self.response.body.replace(
'\'mapreduce_id\':', replacement)
else:
self.response.out.write('Forbidden')
self.response.set_status(403)
class CronMapreduceCleanupHandler(utils.BaseHandler):
def get(self):
"""Clean up intermediate data items for completed or failed M/R jobs.
Map/reduce runs leave around a large number of rows in several
tables. This data is useful to have around for a while:
- it helps diagnose any problems with jobs that may be occurring
- it shows where resource usage is occurring
However, after a few days, this information is less relevant, and
should be cleaned up.
The algorithm here is: for each namespace, find all the expired
map/reduce jobs and clean them up. If this happens to be touching
the M/R job that a MapReduceJob instance is pointing at, buff up
the description of that job to reflect the cleanup. However, since
DurableJobBase-derived things don't keep track of all runs, we
cannot simply use the data_sources.Registry to list MapReduceJobs
and iterate that way; we must iterate over the actual elements
listed in the database.
"""
# Belt and suspenders. The app.yaml settings should ensure that
# only admins can use this URL, but check anyhow.
if not roles.Roles.is_direct_super_admin():
self.error(400)
return
self._clean_mapreduce(
datetime.timedelta(days=MAX_MAPREDUCE_METADATA_RETENTION_DAYS))
@classmethod
def _collect_blobstore_paths(cls, root_key):
paths = set()
# pylint: disable=protected-access
for model, field_name in ((pipeline_models._SlotRecord, 'value'),
(pipeline_models._PipelineRecord, 'params')):
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = (model
.all()
.filter('root_pipeline =', root_key)
.with_cursor(prev_cursor))
for record in query.run():
any_records = True
# The data parameters in SlotRecord and PipelineRecord
# vary widely, but all are provided via this interface as
# some combination of Python scalar, list, tuple, and
# dict. Rather than depend on specifics of the map/reduce
# internals, crush the object to a string and parse that.
try:
data_object = getattr(record, field_name)
except TypeError:
data_object = None
if data_object:
text = transforms.dumps(data_object)
for path in re.findall(r'"(/blobstore/[^"]+)"', text):
paths.add(path)
prev_cursor = query.cursor()
return paths
@classmethod
def _clean_mapreduce(cls, max_age):
"""Separated as internal function to permit tests to pass max_age."""
num_cleaned = 0
# If job has a start time before this, it has been running too long.
min_start_time_datetime = datetime.datetime.utcnow() - max_age
min_start_time_millis = int(
(min_start_time_datetime - datetime.datetime(1970, 1, 1))
.total_seconds() * 1000)
# Iterate over all namespaces in the installation
for course_context in sites.get_all_courses():
with Namespace(course_context.get_namespace_name()):
# Index map/reduce jobs in this namespace by pipeline ID.
jobs_by_pipeline_id = {}
for job_class in data_sources.Registry.get_generator_classes():
if issubclass(job_class, jobs.MapReduceJob):
job = job_class(course_context)
pipe_id = jobs.MapReduceJob.get_root_pipeline_id(
job.load())
jobs_by_pipeline_id[pipe_id] = job
# Clean up pipelines
for state in pipeline.get_root_list()['pipelines']:
pipeline_id = state['pipelineId']
job_definitely_terminated = (
state['status'] == 'done' or
state['status'] == 'aborted' or
state['currentAttempt'] > state['maxAttempts'])
have_start_time = 'startTimeMs' in state
job_started_too_long_ago = (
have_start_time and
state['startTimeMs'] < min_start_time_millis)
if (job_started_too_long_ago or
(not have_start_time and job_definitely_terminated)):
# At this point, the map/reduce pipeline is
# either in a terminal state, or has taken so long
# that there's no realistic possibility that there
# might be a race condition between this and the
# job actually completing.
if pipeline_id in jobs_by_pipeline_id:
jobs_by_pipeline_id[pipeline_id].mark_cleaned_up()
p = pipeline.Pipeline.from_id(pipeline_id)
if p:
# Pipeline cleanup, oddly, does not go clean up
# relevant blobstore items. They have a TODO,
# but it has not been addressed as of Sep 2014.
# pylint: disable=protected-access
root_key = db.Key.from_path(
pipeline_models._PipelineRecord.kind(),
pipeline_id)
for path in cls._collect_blobstore_paths(root_key):
files.delete(path)
# This only enqueues a deferred cleanup item, so
# transactionality with marking the job cleaned is
# not terribly important.
p.cleanup()
num_cleaned += 1
return num_cleaned
def register_module():
"""Registers this module in the registry."""
global_handlers = [
('/cron/mapreduce/cleanup', CronMapreduceCleanupHandler),
]
for path, handler_class in mapreduce_main.create_handlers_map():
# The mapreduce and pipeline libraries are pretty casual about
# mixing up their UI support in with their functional paths.
# Here, we separate things and give them different prefixes
# so that the only-admin-access patterns we define in app.yaml
# can be reasonably clean.
if path.startswith('.*/pipeline'):
if 'pipeline/rpc/' in path or path == '.*/pipeline(/.+)':
path = path.replace('.*/pipeline', '/mapreduce/ui/pipeline')
else:
path = path.replace('.*/pipeline', '/mapreduce/worker/pipeline')
else:
if '_callback' in path:
path = path.replace('.*', '/mapreduce/worker', 1)
elif '/list_configs' in path:
# This needs mapreduce.yaml, which we don't distribute. Not
# having this prevents part of the mapreduce UI front page
# from loading, but we don't care, because we don't want
# people using the M/R front page to relaunch jobs anyhow.
continue
else:
path = path.replace('.*', '/mapreduce/ui', 1)
# The UI needs to be guarded by a config so that casual users aren't
# exposed to the internals, but advanced users can investigate issues.
if '/ui/' in path or path.endswith('/ui'):
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = ui_access_wrapper
global_handlers.append((path, handler_class))
# Wrap worker handlers with check that request really is coming
# from task queue.
else:
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = authorization_wrapper
global_handlers.append((path, handler_class))
# Tell map/reduce internals that this is now the base path to use.
mapreduce_parameters.config.BASE_PATH = '/mapreduce/worker'
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_NAME,
'Provides support for analysis jobs based on map/reduce',
global_handlers, [])
return custom_module
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Banner to obtain consent for usage reporting."""
__author__ = [
'John Orr (jorr@google.com)',
]
import jinja2
import os
import appengine_config
from controllers import utils
from models import roles
from models import transforms
from modules.admin import admin
from modules.dashboard import dashboard
from modules.usage_reporting import config
from modules.usage_reporting import messaging
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'usage_reporting', 'templates')
def _make_consent_banner(handler):
if config.is_consent_set() or messaging.is_disabled():
return None
template_values = {
'xsrf_token': handler.create_xsrf_token(
ConsentBannerRestHandler.XSRF_TOKEN),
'is_super_admin': roles.Roles.is_super_admin()
}
return jinja2.Markup(
handler.get_template('consent_banner.html', [TEMPLATES_DIR]
).render(template_values))
class ConsentBannerRestHandler(utils.BaseRESTHandler):
"""Handle REST requests to set report consent from banner."""
URL = '/rest/modules/usage_reporting/consent'
XSRF_TOKEN = 'usage_reporting_consent_banner'
def post(self):
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, self.XSRF_TOKEN, {}):
return
if not roles.Roles.is_super_admin():
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = transforms.loads(request.get('payload'))
is_allowed = payload['is_allowed']
config.set_report_allowed(is_allowed)
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, is_allowed,
source=messaging.Message.BANNER_SOURCE)
transforms.send_json_response(self, 200, 'OK')
def notify_module_enabled():
dashboard.DashboardHandler.PAGE_HEADER_HOOKS.append(_make_consent_banner)
admin.GlobalAdminHandler.PAGE_HEADER_HOOKS.append(_make_consent_banner)
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reporting of anonymized CourseBuilder usage statistics: send messages."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
import logging
import os
import time
import urllib
import uuid
import appengine_config
from common import utils as common_utils
from controllers import sites
from models import config
from models import courses
from models import transforms
from google.appengine.api import namespace_manager
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.ext import deferred
_INSTALLATION_IDENTIFIER = config.ConfigProperty(
'gcb_report_usage_identifier', str, (
'Randomized string used to identify this installation of '
'CourseBuilder when reporting usage statistics. This value '
'has no intrinsic meaning, and no relation to any data or '
'course setting; it is just used to correlate the weekly '
'reports from this installation.'),
'A random value will be picked when the first report is sent.')
# Name of the item in the course settings dictionary which contains the
# randomly-generated identifier for the course. (This name needs to be
# defined here to prevent circular inclusion problems with this module
# versus 'config')
USAGE_REPORTING_FIELD_ID = 'usage_reporting_id'
# Usage reporting is turned off on dev, but this flag overrides that, to
# enable testing of messaging and UI.
ENABLED_IN_DEV_FOR_TESTING = False
def is_disabled():
return (
not appengine_config.PRODUCTION_MODE and not ENABLED_IN_DEV_FOR_TESTING)
class Sender(object):
"""Namespace to permit replacement of messaging functions for testing."""
# We want to be able to re-point the statistics reporting at some later
# time. To do that, we fetch an enablement flag and a destination URL
# from a JSON document hosted at this URL.
_REPORT_SETTINGS_INFO_URL = (
'https://www.google.com/edu/coursebuilder/stats/config.json')
# Field in control document indicating whether stats reporting is enabled.
_REPORT_ENABLED = 'enabled'
# Field in control document giving target URL to which to POST reports.
_REPORT_TARGET = 'target'
# Field in control document naming form field to use in POST.
_REPORT_FORM_FIELD = 'form_field'
# If we need to make a report, and the target URL is older than this,
# re-fetch the control page so we are current on the latest state of
# the 'enable' and 'target' parameters.
_REPORT_SETTINGS_MAX_AGE = 3600
# Latest values of report settings as loaded from _REPORT_SETTINGS_INFO_URL
_report_settings = {
_REPORT_ENABLED: False,
_REPORT_TARGET: ''
}
_report_settings_timestamp = 0
# Config options for task retries.
_RETRY_OPT_NUM_TRIES = 10
_RETRY_OPT_AGE_LIMIT_SECONDS = 60 * 60 * 10
_RETRY_OPT_MIN_BACKOFF_SECONDS = 60
_RETRY_OPT_MAX_DOUBLINGS = 6
_RETRY_OPT_MAX_BACKOFF_SECONDS = (
_RETRY_OPT_MIN_BACKOFF_SECONDS * (2 ** (_RETRY_OPT_MAX_DOUBLINGS - 1)))
@classmethod
def _refresh_report_settings(cls):
"""Ensure report settings are up-to-date, or raise an exception."""
max_age = cls._report_settings_timestamp + cls._REPORT_SETTINGS_MAX_AGE
if time.time() > max_age:
response = urlfetch.fetch(
cls._REPORT_SETTINGS_INFO_URL, method='GET',
follow_redirects=True)
if response.status_code != 200:
raise RuntimeError(
'Failed to load statistics reporting settings from "%s"' %
cls._REPORT_SETTINGS_INFO_URL)
cls._report_settings = transforms.loads(response.content)
cls._report_settings_timestamp = int(time.time())
@classmethod
def _emit_message(cls, message):
"""Emit message if allowed, not if not, or raise exception."""
cls._refresh_report_settings()
if cls._report_settings[cls._REPORT_ENABLED] and not is_disabled():
try:
payload = urllib.urlencode(
{cls._report_settings[cls._REPORT_FORM_FIELD]: message})
response = urlfetch.fetch(
cls._report_settings[cls._REPORT_TARGET], method='POST',
follow_redirects=True, payload=payload)
except urlfetch.Error:
# If something went so wrong we got an exception (as opposed
# to simply getting a 500 server error from the target),
# reset the timer so we re-fetch configs; presumably humans
# will notice the problem and fix the configs "soon".
cls._report_settings_timestamp = 0
raise
if response.status_code != 200:
raise RuntimeError(
'Failed to send statistics report "%s" to "%s"' % (
message, cls._report_settings[cls._REPORT_TARGET]))
@classmethod
def send_message(cls, the_dict):
message = transforms.dumps(the_dict)
try:
# One attempt to get the message out synchronously.
cls._emit_message(message)
except Exception, ex: # pylint: disable=broad-except
# Anything goes wrong, it goes on the deferred queue for retries.
logging.critical('Problem trying to report statistics: %s', ex)
common_utils.log_exception_origin()
options = taskqueue.TaskRetryOptions(
task_retry_limit=cls._RETRY_OPT_NUM_TRIES,
task_age_limit=cls._RETRY_OPT_AGE_LIMIT_SECONDS,
min_backoff_seconds=cls._RETRY_OPT_MIN_BACKOFF_SECONDS,
max_backoff_seconds=cls._RETRY_OPT_MAX_BACKOFF_SECONDS,
max_doublings=cls._RETRY_OPT_MAX_DOUBLINGS)
deferred.defer(cls._emit_message, message, _retry_options=options)
class Message(object):
"""Namespace to permit replacement of messaging functions for testing."""
# Each message sent to the form is a JSON dict containing these fields.
_TIMESTAMP = 'timestamp'
_VERSION = 'version' # CourseBuilder version
_INSTALLATION = 'installation' # Randomly-chosen install ID
_COURSE = 'course' # Randomly-chosen course ID. Optional.
_METRIC = 'metric' # A name from the set below.
_VALUE = 'value' # Integer or boolean value.
_SOURCE = 'source' # String name of system component.
# Values to be used for the _SOURCE field
ADMIN_SOURCE = 'ADMIN_SETTINGS'
BANNER_SOURCE = 'CONSENT_BANNER'
WELCOME_SOURCE = 'WELCOME_PAGE'
# Allowed values that can be used for the 'metric' parameter in
# send_course_message() and send_instance_message().
METRIC_REPORT_ALLOWED = 'report_allowed' # True/False
METRIC_STUDENT_COUNT = 'student_count' # Num students in course.
METRIC_ENROLLED = 'enrolled' # Num students enrolled in 1-hour block.
METRIC_UNENROLLED = 'unenrolled' # Num students unenrolled in 1-hour block.
METRIC_COURSE_CREATED = 'course_created' # Always 1 when course created.
_ALLOWED_METRICS = [
METRIC_REPORT_ALLOWED,
METRIC_STUDENT_COUNT,
METRIC_ENROLLED,
METRIC_UNENROLLED,
METRIC_COURSE_CREATED,
]
@classmethod
def _get_random_course_id(cls, course):
"""If not yet chosen, randomly select an identifier for this course."""
all_settings = course.get_environ(course.app_context)
course_settings = all_settings[courses.Course.SCHEMA_SECTION_COURSE]
reporting_id = course_settings.get(USAGE_REPORTING_FIELD_ID)
if not reporting_id or reporting_id == 'None':
reporting_id = str(uuid.uuid4())
course_settings[USAGE_REPORTING_FIELD_ID] = reporting_id
course.save_settings(all_settings)
return reporting_id
@classmethod
def _get_random_installation_id(cls):
"""If not yet chosen, pick a random identifier for the installation."""
cfg = _INSTALLATION_IDENTIFIER
if not cfg.value or cfg.value == cfg.default_value:
with common_utils.Namespace(
appengine_config.DEFAULT_NAMESPACE_NAME):
entity = config.ConfigPropertyEntity.get_by_key_name(cfg.name)
if not entity:
entity = config.ConfigPropertyEntity(key_name=cfg.name)
ret = str(uuid.uuid4())
entity.value = ret
entity.is_draft = False
entity.put()
else:
ret = cfg.value
return ret
@classmethod
def _get_time(cls):
return int(time.time())
@classmethod
def _add_course_field(cls, message):
if cls._COURSE not in message:
namespace = namespace_manager.get_namespace()
app_context = (
sites.get_course_index().get_app_context_for_namespace(
namespace))
course = courses.Course(None, app_context=app_context)
message[cls._COURSE] = cls._get_random_course_id(course)
@classmethod
def _build_message(cls, metric, value, source, timestamp):
if metric not in cls._ALLOWED_METRICS:
raise ValueError('Metric name "%s" not in %s' % (
metric, ' '.join(cls._ALLOWED_METRICS)))
message = {
cls._METRIC: metric,
cls._VALUE: value,
cls._VERSION: os.environ['GCB_PRODUCT_VERSION'],
cls._INSTALLATION: cls._get_random_installation_id()
}
if source is not None:
message[cls._SOURCE] = source
if not timestamp:
timestamp = cls._get_time()
message[cls._TIMESTAMP] = timestamp
return message
@classmethod
def send_course_message(cls, metric, value, source=None, timestamp=None):
message = cls._build_message(metric, value, source, timestamp)
cls._add_course_field(message)
Sender.send_message(message)
@classmethod
def send_instance_message(cls, metric, value, source=None, timestamp=None):
message = cls._build_message(metric, value, source, timestamp)
Sender.send_message(message)
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reporting of anonymized CourseBuilder usage statistics: configuration."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
import appengine_config
from common import schema_fields
from common import utils as common_utils
from models import config
from models import courses
from modules.usage_reporting import messaging
def _on_change_report_allowed(config_property, unused_old_value):
"""Callback to report externally when value of REPORT_ALLOWED changes."""
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, config_property.value,
source=messaging.Message.ADMIN_SOURCE)
REPORT_ALLOWED = config.ConfigProperty(
'gcb_report_usage_permitted', bool,
'Whether anonymized per-course usage statistics should be sent to the '
'CourseBuilder team. The report contains randomly chosen identifiers '
'for the installation and course (to correlate reports) '
'hour-by-hour data on enrollments/unenrollments, and current number of '
'students. This report is sent once a week.',
default_value=False, after_change=_on_change_report_allowed)
def set_report_allowed(value):
with common_utils.Namespace(appengine_config.DEFAULT_NAMESPACE_NAME):
entity = config.ConfigPropertyEntity.get_by_key_name(
REPORT_ALLOWED.name)
if not entity:
entity = config.ConfigPropertyEntity(key_name=REPORT_ALLOWED.name)
entity.value = str(value)
entity.is_draft = False
entity.put()
def is_consent_set():
with common_utils.Namespace(appengine_config.DEFAULT_NAMESPACE_NAME):
return (
config.ConfigPropertyEntity.get_by_key_name(REPORT_ALLOWED.name)
is not None)
def notify_module_enabled():
course_random_id = schema_fields.SchemaField(
courses.Course.SCHEMA_SECTION_COURSE + ':' +
messaging.USAGE_REPORTING_FIELD_ID,
'Usage Reporting ID', 'string',
optional=True, editable=False, i18n=False, hidden=True,
description='When usage reporting for CourseBuilder is enabled, this '
'string is used to identify data from this course. The value is '
'randomly selected when the first report is sent.')
course_settings_fields = (
lambda c: course_random_id,
)
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
courses.Course.SCHEMA_SECTION_COURSE] += course_settings_fields
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reporting of anonymized CourseBuilder usage statistics: welcome page."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
from common import safe_dom
from modules.admin import admin
from modules.usage_reporting import config
from modules.usage_reporting import messaging
USAGE_REPORTING_CONSENT_CHECKBOX_NAME = 'usage_reporting_consent'
USAGE_REPORTING_CONSENT_CHECKBOX_VALUE = 'accepted'
def _welcome_form_submitted(app_context, handler):
"""Note value of reporting consent checkbox submitted with Welcome form."""
consent_val = handler.request.get(USAGE_REPORTING_CONSENT_CHECKBOX_NAME)
is_allowed = (consent_val == USAGE_REPORTING_CONSENT_CHECKBOX_VALUE)
config.set_report_allowed(is_allowed)
messaging.Message.send_instance_message(
messaging.Message.METRIC_REPORT_ALLOWED, is_allowed,
source=messaging.Message.WELCOME_SOURCE)
def _make_welcome_form_content():
"""Add content to welcome page to get user's consent for stat collection."""
if messaging.is_disabled():
return None
checkbox = safe_dom.Element(
'input'
).set_attribute(
'type', 'checkbox'
).set_attribute(
'name', USAGE_REPORTING_CONSENT_CHECKBOX_NAME
).set_attribute(
'value', USAGE_REPORTING_CONSENT_CHECKBOX_VALUE
)
if config.REPORT_ALLOWED.value or not config.is_consent_set():
checkbox.set_attribute('checked', 'checked')
return safe_dom.Element(
'div'
).set_attribute(
'style', 'width: 60%; margin: 0 auto; '
).append(
safe_dom.Element(
'div'
).set_attribute(
'style', 'float: left; width: 10%; '
).add_child(checkbox)
).append(
safe_dom.Element(
'div'
).set_attribute(
'style', 'float: left; width: 90%; text-align: left'
).add_text(
'I agree that Google may collect information about this '
'deployment of Course Builder to help improve Google\'s '
'products and services and for research purposes. '
'Google will maintain this data in acccordance with '
).add_child(
safe_dom.A(
'http://www.google.com/policies/privacy/'
).add_text(
'Google\'s privacy policy'
)
).add_text(
' and will not associate the data it collects with '
'this course or a user. Your response to this question '
'will be sent to Google.'
)
).append(
safe_dom.Element(
'div'
).set_attribute(
'style', 'clear: both; '
)
)
def notify_module_enabled():
admin.WelcomeHandler.WELCOME_FORM_HOOKS.append(_make_welcome_form_content)
admin.WelcomeHandler.POST_HOOKS.append(_welcome_form_submitted)
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reporting of anonymized CourseBuilder usage statistics: enrollment counts.
To avoid sending a message to the Google Forms instance on every single
student enroll/unenroll event, we internally store enroll/unenroll events in
the DB. The weekly reporting cron notification will kick off a map/reduce job
to count the number of enroll/unenroll events per hour; these will be
separately posted to the Google Form. After a suitable amount of time, the
older entries will be purged. Here, "suitable" means that we wait long
enough that we are certain that the usage has been reported, even after some
retries.
Deduplication of reports is handled separately. Our data reporting mechanism
is simply POSTs to a Google Forms document, which forwards results to a
spreadsheet, whence the data can be downloaded as CSV, which will be run
through a simple Python script to do deduplication and any other sanitization
steps required.
"""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
import time
from mapreduce import context
from models import jobs
from models import models
from models import transforms
from modules.usage_reporting import messaging
from google.appengine.ext import db
SECONDS_PER_HOUR = 60 * 60
class StudentEnrollmentEventEntity(models.BaseEntity):
"""Each record represents one enroll/unenroll event. Contains no PII."""
data = db.TextProperty(indexed=False)
class StudentEnrollmentEventDTO(object):
"""Convenience functions accessing items 'data' JSON in entity."""
METRIC = 'metric'
TIMESTAMP = 'timestamp'
def __init__(self, the_id, the_dict):
for key in the_dict:
if key not in (self.METRIC, self.TIMESTAMP):
raise ValueError(
'Unexpected field present in StudentEnrollmentEventEntity. '
'Please consider whether this field might ever contain '
'personally identifiable information, and if so, take '
'appropriate measures to ensure that this information is '
'subject to wipeout restrictions: list the field in '
'StudentEnrollmentEventEntity._PROPERTY_EXPORT_BLACKLIST, '
'or implement safe_key() (for the key field), or '
'for_export() for non-key fields. See '
'models.models.Student for example code.')
self.id = the_id
self.dict = the_dict
@property
def metric(self):
return self.dict[self.METRIC]
@metric.setter
def metric(self, metric):
self.dict[self.METRIC] = metric
@property
def timestamp(self):
return self.dict[self.TIMESTAMP]
@timestamp.setter
def timestamp(self, timestamp):
self.dict[self.TIMESTAMP] = timestamp
class StudentEnrollmentEventDAO(models.BaseJsonDao):
"""Manager/policy-definition object for StudentEnrollmentEventEntity."""
DTO = StudentEnrollmentEventDTO
ENTITY = StudentEnrollmentEventEntity
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeId
@classmethod
def insert(cls, metric):
event = StudentEnrollmentEventDTO(None, {})
event.timestamp = int(time.time())
event.metric = metric
cls.save(event)
def _student_add_post_hook(unused_student):
"""Hook called back when a student is added or re-added."""
StudentEnrollmentEventDAO.insert(messaging.Message.METRIC_ENROLLED)
def _student_update_post_hook(
profile, student, user_id, email, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None, profile_only=False):
"""Hook called back when student properties are changed."""
# Only report if 'is_enrolled' status is changing.
if is_enrolled is not None:
StudentEnrollmentEventDAO.insert(
messaging.Message.METRIC_ENROLLED if is_enrolled else
messaging.Message.METRIC_UNENROLLED)
class StudentEnrollmentEventCounter(jobs.AbstractCountingMapReduceJob):
"""M/R job to aggregate, report enroll/unenroll counts bucketed by hour."""
MAX_AGE = SECONDS_PER_HOUR * 24 * 7 * 4 # 4 weeks
MIN_TIMESTAMP = 'min_timestamp'
@staticmethod
def get_description():
return 'Count enroll/unenroll events, grouped by hour. Clean old items.'
def entity_class(self):
return StudentEnrollmentEventEntity
def build_additional_mapper_params(self, app_context):
# Pick a single time in the past which is on an even hour boundary
# so that if this job runs across an hour boundary, we don't wind
# up changing our minds in the middle of things about what "too old"
# is, and reporting inconsistent data.
now = int(time.time())
min_timestamp = now - (now % SECONDS_PER_HOUR) - self.MAX_AGE
return {self.MIN_TIMESTAMP: min_timestamp}
@staticmethod
def form_key(event):
"""Generate a map key: timestamp, then metric name."""
return '%d_%s' % (
event.timestamp - (event.timestamp % SECONDS_PER_HOUR),
event.metric)
@staticmethod
def parse_key(key_string):
"""Split a map key string into component timestamp and metric name."""
parts = key_string.split('_', 1)
return int(parts[0]), parts[1]
@staticmethod
def map(event):
"""For each event, either discard or send to reducer for aggregation."""
event = StudentEnrollmentEventDTO(
event.key().id(), transforms.loads(event.data))
# Clear out events that are "very old" - i.e., those that we are
# sure we will already have reported on.
mapper_params = context.get().mapreduce_spec.mapper.params
min_timestamp = (
mapper_params[StudentEnrollmentEventCounter.MIN_TIMESTAMP])
if event.timestamp < min_timestamp:
StudentEnrollmentEventDAO.delete(event)
else:
yield StudentEnrollmentEventCounter.form_key(event), 1
@staticmethod
def combine(unused_key, values, previously_combined_outputs=None):
total = sum([int(value) for value in values])
if previously_combined_outputs is not None:
total += sum([int(value) for value in previously_combined_outputs])
yield total
@staticmethod
def reduce(key, values):
"""Sum count of events, and send report to Google Form."""
total = sum(int(value) for value in values)
timestamp, metric = StudentEnrollmentEventCounter.parse_key(key)
messaging.Message.send_course_message(
metric, total, timestamp=timestamp)
def notify_module_enabled():
models.StudentProfileDAO.UPDATE_POST_HOOKS.append(
_student_update_post_hook)
models.StudentProfileDAO.ADD_STUDENT_POST_HOOKS.append(
_student_add_post_hook)
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reporting of anonymized CourseBuilder usage statistics: count students."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
from models import jobs
from models import models
from modules.usage_reporting import messaging
class StudentCounter(jobs.MapReduceJob):
"""M/R job to count students in the course."""
@staticmethod
def get_description():
return 'Count number of students in course. Used for usage reporting.'
def entity_class(self):
return models.Student
@staticmethod
def map(student):
# TODO - count: registered, unregistered, completed, certificated
yield (messaging.Message.METRIC_STUDENT_COUNT, 1)
@staticmethod
def combine(unused_key, values, previously_combined_outputs=None):
total = sum([int(value) for value in values])
if previously_combined_outputs is not None:
total += sum([int(value) for value in previously_combined_outputs])
yield total
@staticmethod
def reduce(key, values):
total = sum(int(value) for value in values)
messaging.Message.send_course_message(key, total)
yield key, total
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enable reporting of anonymized usage statistics to CourseBuilder team."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
from controllers import sites
from controllers import utils
from models import custom_modules
from modules.usage_reporting import config
from modules.usage_reporting import consent_banner
from modules.usage_reporting import course_creation
from modules.usage_reporting import enrollment
from modules.usage_reporting import students
custom_module = None
class StartReportingJobs(utils.BaseHandler):
"""Handle callback from cron; launch map/reduce jobs which report stats."""
URL = '/cron/usage_reporting/report_usage'
def get(self):
if not config.REPORT_ALLOWED.value:
self.response.write('Disabled.')
self.response.set_status(200)
return
if 'X-AppEngine-Cron' not in self.request.headers:
self.response.out.write('Forbidden.')
self.response.set_status(403)
return
self._submit_jobs()
self.response.write('OK.')
self.response.set_status(200)
@classmethod
def _submit_jobs(cls):
for course_context in sites.get_all_courses():
per_course_jobs = [
students.StudentCounter(course_context),
enrollment.StudentEnrollmentEventCounter(course_context),
]
for job in per_course_jobs:
if job.is_active():
job.cancel()
job.submit()
def _notify_module_enabled():
config.notify_module_enabled()
consent_banner.notify_module_enabled()
course_creation.notify_module_enabled()
enrollment.notify_module_enabled()
def register_module():
global custom_module # pylint: disable=global-statement
global_handlers = [
(StartReportingJobs.URL, StartReportingJobs),
(
consent_banner.ConsentBannerRestHandler.URL,
consent_banner.ConsentBannerRestHandler)]
custom_module = custom_modules.Module(
'Usage Reporting',
'Sends anonymized usage statistics to CourseBuilder team.',
global_handlers, [],
notify_module_enabled=_notify_module_enabled)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student HTML file submission upload module."""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import os
import jinja2
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import utils
from models import custom_modules
from models import models
from models import student_work
from google.appengine.ext import db
# String. Url fragment after the namespace we POST user payloads to.
_POST_ACTION_SUFFIX = '/upload'
# String. Course Builder root-relative path where resources for this module are.
_RESOURCES_PATH = os.path.join(os.path.sep, 'modules', 'upload', 'resources')
# String. Post form XSRF token name.
_XSRF_TOKEN_NAME = 'user-upload-form-xsrf'
class TextFileUploadHandler(utils.BaseHandler):
def get_template(self, template_file, additional_dirs=None, prefs=None):
dirs = additional_dirs if additional_dirs else []
dirs.append(os.path.join(os.path.dirname(__file__), 'templates'))
return super(TextFileUploadHandler, self).get_template(
template_file, additional_dirs=dirs, prefs=prefs)
def post(self):
"""Creates or updates a student submission."""
token = self.request.get('form_xsrf_token')
if not utils.XsrfTokenManager.is_xsrf_token_valid(
token, _XSRF_TOKEN_NAME):
self.error(400)
return
student = self.personalize_page_and_get_enrolled()
if not student:
self.error(403)
return
success = False
unit_id = self.request.get('unit_id')
contents = self.request.get('contents')
if not contents:
self.error(400)
else:
try:
success = bool(student_work.Submission.write(
unit_id, student.get_key(), contents))
# All write errors are treated equivalently.
# pylint: disable=broad-except
except Exception as e:
self.error(400)
logging.warn(
'Unable to save student submission; error was: "%s"', e)
self.template_value['navbar'] = {'course': True}
self.template_value['success'] = success
self.template_value['unit_id'] = unit_id
self.render('result.html')
class TextFileUploadTag(tags.BaseTag):
"""Renders a form for uploading a text file."""
binding_name = 'text-file-upload-tag'
@classmethod
def name(cls):
return 'Student Text File Upload'
@classmethod
def vendor(cls):
return 'gcb'
def _get_action(self, slug):
action = slug + _POST_ACTION_SUFFIX
return action.replace('//', '/')
def get_icon_url(self):
return os.path.join(_RESOURCES_PATH, 'script_add.png')
def get_schema(self, unused_handler):
"""Gets the tag's schema."""
registry = schema_fields.FieldRegistry(TextFileUploadTag.name())
registry.add_property(schema_fields.SchemaField(
'display_length', 'Display Length', 'integer',
description=(
'Number of characters in the filename display (supported '
'browsers only).'),
extra_schema_dict_values={'value': 100},
))
return registry
def render(self, node, handler):
"""Renders the custom tag."""
student = handler.personalize_page_and_get_enrolled(
supports_transient_student=True)
template = jinja_utils.get_template(
'templates/form.html', os.path.dirname(__file__))
already_submitted = False
if not isinstance(student, models.TransientStudent):
already_submitted = bool(
db.get(student_work.Submission.get_key(
handler.unit_id, student.get_key())))
handler.template_value['action'] = self._get_action(
handler.app_context.get_slug())
handler.template_value['already_submitted'] = already_submitted
handler.template_value['display_length'] = node.attrib.get(
'display_length')
handler.template_value['form_xsrf_token'] = (
utils.XsrfTokenManager.create_xsrf_token(
_XSRF_TOKEN_NAME))
handler.template_value['unit_id'] = handler.unit_id
return tags.html_string_to_element_tree(
jinja2.utils.Markup(template.render(handler.template_value))
)
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(TextFileUploadTag.binding_name)
tags.EditorBlacklists.unregister(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.unregister(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
def on_module_enable():
tags.Registry.add_tag_binding(
TextFileUploadTag.binding_name, TextFileUploadTag)
tags.EditorBlacklists.register(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.register(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
global_routes = [
(os.path.join(_RESOURCES_PATH, '.*'), tags.ResourcesHandler),
]
namespaced_routes = [
(_POST_ACTION_SUFFIX, TextFileUploadHandler),
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Student Text File Submission Upload',
'Adds a custom tag for students to upload text files <= 1MB in size.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable,
)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic object editor view that uses REST services."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import urllib
import jinja2
import webapp2
import appengine_config
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import utils
from models import custom_modules
from models import transforms
from models.config import ConfigProperty
# a set of YUI and inputex modules required by the editor
COMMON_REQUIRED_MODULES = [
'inputex-group', 'inputex-form', 'inputex-jsonschema']
ALL_MODULES = [
'querystring-stringify-simple', 'inputex-select', 'inputex-string',
'inputex-radio', 'inputex-date', 'inputex-datepicker', 'inputex-checkbox',
'inputex-list', 'inputex-color', 'gcb-rte', 'inputex-textarea',
'inputex-url', 'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-file', 'io-upload-iframe']
RESOURCES_PATH = '/modules/oeditor/resources'
# Global code syntax highlighter controls.
CAN_HIGHLIGHT_CODE = ConfigProperty(
'gcb_can_highlight_code', bool, (
'Whether or not to highlight code syntax '
'in Dashboard editors and displays.'),
True)
class ObjectEditor(object):
"""Generic object editor powered by jsonschema."""
# Modules can add extra script tags to the oeditor page by registering a
# callback function here. The callback function will receive the app_context
# as an argument, and should return an iterable of strings, each of which is
# the URL of a script library.
EXTRA_SCRIPT_TAG_URLS = []
@classmethod
def get_html_for(
cls, handler, schema_json, annotations, object_key,
rest_url, exit_url,
extra_args=None,
save_method='put',
delete_url=None, delete_message=None, delete_method='post',
auto_return=False, read_only=False,
required_modules=None,
extra_css_files=None,
extra_js_files=None,
additional_dirs=None,
delete_button_caption='Delete',
save_button_caption='Save',
exit_button_caption='Close'):
"""Creates an HTML code needed to embed and operate this form.
This method creates an HTML, JS and CSS required to embed JSON
schema-based object editor into a view.
Args:
handler: a BaseHandler class, which will host this HTML, JS and CSS
schema_json: a text of JSON schema for the object being edited
annotations: schema annotations dictionary
object_key: a key of an object being edited
rest_url: a REST endpoint for object GET/PUT operation
exit_url: a URL to go to after the editor form is dismissed
extra_args: extra request params passed back in GET and POST
save_method: how the data should be saved to the server (put|upload)
delete_url: optional URL for delete operation
delete_message: string. Optional custom delete confirmation message
delete_method: optional HTTP method for delete operation
auto_return: whether to return to the exit_url on successful save
read_only: optional flag; if set, removes Save and Delete operations
required_modules: list of inputex modules required for this editor
extra_css_files: list of extra CSS files to be included
extra_js_files: list of extra JS files to be included
additional_dirs: list of extra directories to look for
Jinja template files, e.g., JS or CSS files included by modules.
delete_button_caption: string. A caption for the 'Delete' button
save_button_caption: a caption for the 'Save' button
exit_button_caption: a caption for the 'Close' button
Returns:
The HTML, JS and CSS text that will instantiate an object editor.
"""
required_modules = required_modules or ALL_MODULES
if not delete_message:
kind = transforms.loads(schema_json).get('description')
if not kind:
kind = 'Generic Object'
delete_message = 'Are you sure you want to delete this %s?' % kind
# construct parameters
get_url = rest_url
get_args = {'key': object_key}
post_url = rest_url
post_args = {'key': object_key}
if extra_args:
get_args.update(extra_args)
post_args.update(extra_args)
if read_only:
post_url = ''
post_args = ''
custom_rte_tag_icons = []
for tag, tag_class in tags.get_tag_bindings().items():
custom_rte_tag_icons.append({
'name': tag,
'iconUrl': tag_class().get_icon_url()})
extra_script_tag_urls = []
for callback in cls.EXTRA_SCRIPT_TAG_URLS:
for url in callback():
extra_script_tag_urls.append(url)
template_values = {
'enabled': custom_module.enabled,
'schema': schema_json,
'get_url': '%s?%s' % (get_url, urllib.urlencode(get_args, True)),
'save_url': post_url,
'save_args': transforms.dumps(post_args),
'exit_button_caption': exit_button_caption,
'exit_url': exit_url,
'required_modules': COMMON_REQUIRED_MODULES + required_modules,
'extra_css_files': extra_css_files or [],
'extra_js_files': extra_js_files or [],
'schema_annotations': [
(item[0], transforms.dumps(item[1])) for item in annotations],
'save_method': save_method,
'auto_return': auto_return,
'delete_button_caption': delete_button_caption,
'save_button_caption': save_button_caption,
'custom_rte_tag_icons': transforms.dumps(custom_rte_tag_icons),
'delete_message': delete_message,
'can_highlight_code': CAN_HIGHLIGHT_CODE.value,
'extra_script_tag_urls': extra_script_tag_urls,
}
if delete_url and not read_only:
template_values['delete_url'] = delete_url
if delete_method:
template_values['delete_method'] = delete_method
if appengine_config.BUNDLE_LIB_FILES:
template_values['bundle_lib_files'] = True
return jinja2.utils.Markup(handler.get_template('oeditor.html', (
[os.path.dirname(__file__)] + (additional_dirs or [])
)).render(template_values))
class PopupHandler(webapp2.RequestHandler, utils.ReflectiveRequestHandler):
"""A handler to serve the content of the popup subeditor."""
default_action = 'custom_tag'
get_actions = ['edit_custom_tag', 'add_custom_tag']
post_actions = []
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, dirs + [os.path.dirname(__file__)])
def _validate_schema(self, tag, schema):
if schema.has_subregistries():
return tag.unavailable_schema(
'This tag has an invalid schema and cannot be edited. '
'Only simple field types are allowed.')
text_field_count = 0
index = schema_fields.FieldRegistryIndex(schema)
index.rebuild()
for name in index.names_in_order:
if index.find(name).type == 'text':
text_field_count += 1
if text_field_count > 1:
return tag.unavailable_schema(
'This tag has an invalid schema and cannot be edited. '
'Only one field of type "text" is allowed.')
return schema
def get_edit_custom_tag(self):
"""Return the the page used to edit a custom HTML tag in a popup."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
tag_class = tag_bindings[tag_name]
tag = tag_class()
schema = tag.get_schema(self)
schema = self._validate_schema(tag, schema)
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None,
required_modules=tag_class.required_modules(),
extra_js_files=tag_class.extra_js_files(),
extra_css_files=tag_class.extra_css_files(),
additional_dirs=tag_class.additional_dirs())
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def get_add_custom_tag(self):
"""Return the page for the popup used to add a custom HTML tag."""
tag_name = self.request.get('tag_name')
excluded_tags = self.request.get_all('excluded_tags')
tag_bindings = tags.get_tag_bindings()
select_data = []
for name in tag_bindings.keys():
if name not in excluded_tags:
clazz = tag_bindings[name]
select_data.append((name, '%s: %s' % (
clazz.vendor(), clazz.name())))
select_data = sorted(select_data, key=lambda pair: pair[1])
if tag_name:
tag_class = tag_bindings[tag_name]
else:
tag_class = tag_bindings[select_data[0][0]]
tag = tag_class()
tag_schema = tag.get_schema(self)
tag_schema = self._validate_schema(tag, tag_schema)
schema = schema_fields.FieldRegistry('Add a Component')
type_select = schema.add_sub_registry('type', 'Component Type')
type_select.add_property(schema_fields.SchemaField(
'tag', 'Name', 'string', select_data=select_data))
schema.add_sub_registry('attributes', registry=tag_schema)
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None,
required_modules=tag_class.required_modules(),
extra_js_files=['add_custom_tag.js'] + tag_class.extra_js_files(),
extra_css_files=tag_class.extra_css_files(),
additional_dirs=tag_class.additional_dirs())
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def create_bool_select_annotation(
keys_list, label, true_label, false_label, class_name=None,
description=None):
"""Creates inputex annotation to display bool type as a select."""
properties = {
'label': label, 'choices': [
{'value': True, 'label': true_label},
{'value': False, 'label': false_label}]}
if class_name:
properties['className'] = class_name
if description:
properties['description'] = description
return (keys_list, {'type': 'select', '_inputex': properties})
custom_module = None
def register_module():
"""Registers this module in the registry."""
from controllers import sites
yui_handlers = [
('/static/inputex-3.1.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'))),
('/static/yui_3.6.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'))),
('/static/2in3/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip')))]
codemirror_handler = [
('/static/codemirror/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/codemirror-4.5.0.zip')))]
if appengine_config.BUNDLE_LIB_FILES:
yui_handlers += [
('/static/combo/inputex', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'),
'/static/inputex-3.1.0/')),
('/static/combo/yui', sites.make_css_combo_zip_handler(
os.path.join(appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'),
'/yui/')),
('/static/combo/2in3', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip'),
'/static/2in3/'))]
oeditor_handlers = [('/oeditorpopup', PopupHandler)]
global_routes = yui_handlers + codemirror_handler + [
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Object Editor',
'A visual editor for editing various types of objects.',
global_routes, oeditor_handlers)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for analytics on course dashboard pages."""
__author__ = ['Michael Gainer (mgainer@google.com)']
from models import analytics
from models import custom_modules
from models import data_sources
from modules.analytics import answers_aggregator
from modules.analytics import clustering
from modules.analytics import location_aggregator
from modules.analytics import page_event_aggregator
from modules.analytics import student_aggregate
from modules.analytics import user_agent_aggregator
from modules.analytics import youtube_event_aggregator
from modules.dashboard import tabs
from modules.dashboard.dashboard import DashboardHandler
custom_module = None
def register_tabs():
clusters_visualization = analytics.Visualization(
'clusters',
'Cluster Manager',
'clustering.html',
data_source_classes=[clustering.ClusterDataSource])
student_vectors_visualization = analytics.Visualization(
'student_vectors',
'Student Vectors',
'student_vectors.html',
data_source_classes=[clustering.TentpoleStudentVectorDataSource])
stats_visualization = analytics.Visualization(
'clustering_stats',
'Clustering Statistics',
'cluster_stats.html',
data_source_classes=[clustering.ClusterStatisticsDataSource])
tabs.Registry.register('analytics', 'clustering', 'Clustering',
[clusters_visualization,
student_vectors_visualization,
stats_visualization])
def add_actions():
def cluster_prepare_template(dashboard_instance):
key = dashboard_instance.request.get('key')
template_values = {}
template_values['page_title'] = dashboard_instance.format_title(
'Edit Cluster')
template_values['main_content'] = dashboard_instance.get_form(
clustering.ClusterRESTHandler, key,
'/dashboard?action=analytics&tab=clustering',
auto_return=True, app_context=dashboard_instance.app_context)
dashboard_instance.render_page(template_values, 'clusters')
DashboardHandler.add_custom_get_action('add_cluster',
cluster_prepare_template)
DashboardHandler.add_custom_get_action('edit_cluster',
cluster_prepare_template)
def get_namespaced_handlers():
return [(clustering.ClusterRESTHandler.URI, clustering.ClusterRESTHandler)]
def register_module():
def on_module_enabled():
page_event_aggregator.register_base_course_matchers()
student_aggregate.StudentAggregateComponentRegistry.register_component(
location_aggregator.LocationAggregator)
student_aggregate.StudentAggregateComponentRegistry.register_component(
location_aggregator.LocaleAggregator)
student_aggregate.StudentAggregateComponentRegistry.register_component(
user_agent_aggregator.UserAgentAggregator)
student_aggregate.StudentAggregateComponentRegistry.register_component(
answers_aggregator.AnswersAggregator)
student_aggregate.StudentAggregateComponentRegistry.register_component(
page_event_aggregator.PageEventAggregator)
student_aggregate.StudentAggregateComponentRegistry.register_component(
youtube_event_aggregator.YouTubeEventAggregator)
data_sources.Registry.register(
student_aggregate.StudentAggregateComponentRegistry)
data_sources.Registry.register(clustering.ClusterDataSource)
data_sources.Registry.register(clustering.ClusterStatisticsDataSource)
data_sources.Registry.register(
clustering.TentpoleStudentVectorDataSource)
register_tabs()
add_actions()
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Analytics', 'Data sources and dashboard analytics pages',
[], get_namespaced_handlers(),
notify_module_enabled=on_module_enabled)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student aggregate collection of page enter/exit events."""
__author__ = ['Michael Gainer (mgainer@google.com)']
import collections
import datetime
import urlparse
from common import schema_fields
from models import courses
from models import transforms
from modules.analytics import student_aggregate
from tools import verify
UNIX_EPOCH = datetime.datetime(year=1970, month=1, day=1)
class AbstractPageEventMatcher(object):
def get_name(self):
"""Return a short string identifying this matcher."""
raise NotImplementedError()
def get_path_match(self):
"""Provide the exact portion of a location URL to match.
E.g., "/unit", "/course", etc.
"""
raise NotImplementedError()
def match(self, static_params, query_params):
"""Perform matching on the given query parameters.
This function is only called if the URL matches on the path
component. Return a 2-tuple of name and item_id (as described
above in PageEvent) if the given params match, or None if they
do not.
Args:
static_params: the value returned from build_static_params(), or None.
query_params: a dict of URL parameters.
Returns:
A 2-tuple of name, item_id or None.
"""
raise NotImplementedError()
@classmethod
def build_static_params(cls, unused_app_context):
"""Build any expensive-to-calculate items at course level.
If this class needs to pre-calculate any facts that would be expensive
to regenerate on each call to process_event(), those facts can be
returned as a single object from this method. If no such facts are
required, return None. This function is called once when each
map/reduce job starts. Any type of object may be returned.
Args:
unused_app_context: A standard CB application context object.
Returns:
Any.
"""
return None
class PathOnlyMatcher(AbstractPageEventMatcher):
def __init__(self, name, path_match):
self._name = name
self._path_match = path_match
def get_name(self):
return self._path_match
def get_path_match(self):
return self._path_match
def match(self, static_params, query_params):
return (self._name, None)
class AssessmentMatcher(AbstractPageEventMatcher):
@classmethod
def get_name(cls):
return 'assessment'
@classmethod
def get_path_match(cls):
return '/assessment'
@classmethod
def match(cls, static_params, query_params):
if 'name' in query_params:
return ('assessment', query_params['name'][0])
return None
class UnitMatcher(AbstractPageEventMatcher):
@classmethod
def get_name(cls):
return 'unit'
@classmethod
def get_path_match(cls):
return '/unit'
@classmethod
def match(cls, static_params, query_params):
if 'lesson' in query_params:
return 'lesson', query_params['lesson'][0]
if 'assessment' in query_params:
# Occurs for pre/post assessment in unit.
return 'assessment', query_params['assessment'][0]
# OK, now we get ambiguous. If we have a unit ID, then either we are
# going to be returning a lesson or asssessment, depending on what's
# first in the unit, or just the unit if the unit is marked as
# show-all-lessons. Here, we start to realize that the whole reliance
# on URLs is silly, and we wish that we were actually recording what
# was shown as part of the event. Particularly note that we are only
# best-guessing about what actually showed if we don't have full data,
# since we are looking at the course now and just believing that the
# arrangement hasn't changed since the event was emitted. This is
# reasonable, but not _necessarily_ true.
if 'unit' in query_params:
unit_id = query_params['unit'][0]
else:
unit_id = static_params['first_unit_id']
if unit_id in static_params:
return static_params[unit_id]
@classmethod
def build_static_params(cls, app_context):
"""Provide map of unit ID to result to report for partial unit URLs.
The result returned by this function is passed in to the map/reduce
job aggregating event data on a per-Student basis. It is retrieved
just above via "units_info = params['unit_page_disambiguation']".
When a URL referencing a unit is not fully specified, the first
item in the unit is shown. This function pre-computes the
result that get_mapper_result() should return when it has only
the unit ID provided.
Args:
app_context: Standard CB application context object.
Returns:
A map from Unit ID to a 2-tuple of page-type name and ID.
"""
ret = {}
course = courses.Course(None, app_context=app_context)
for unit in course.get_units_of_type(verify.UNIT_TYPE_UNIT):
if 'first_unit_id' not in ret:
ret['first_unit_id'] = str(unit.unit_id)
lessons = course.get_lessons(unit.unit_id)
if unit.show_contents_on_one_page:
ret[unit.unit_id] = ('unit', str(unit.unit_id))
elif unit.pre_assessment:
ret[unit.unit_id] = ('assessment', str(unit.pre_assessment))
elif lessons:
ret[unit.unit_id] = ('lesson', str(lessons[0].lesson_id))
elif unit.post_assessment:
ret[unit.unit_id] = ('assessment', str(unit.post_assessment))
else:
ret[unit.unit_id] = ('unit', str(unit.unit_id))
return ret
class PageEventAggregator(
student_aggregate.AbstractStudentAggregationComponent):
_matchers_by_name = {}
_matchers_by_path = collections.defaultdict(list)
@classmethod
def get_name(cls):
return 'page_event'
@classmethod
def get_event_sources_wanted(cls):
return ['enter-page', 'exit-page', 'tag-youtube-event']
@classmethod
def build_static_params(cls, app_context):
ret = {}
slug = app_context.get_slug()
if not slug or slug == '/':
slug = ''
ret['slug'] = slug
for name, matcher in cls._matchers_by_name.iteritems():
value = matcher.build_static_params(app_context)
if value:
ret[name] = value
return ret
@classmethod
def process_event(cls, event, static_params):
ret = []
data = transforms.loads(event.data)
url_parts = urlparse.urlparse(data.get('location', ''))
query_params = urlparse.parse_qs(url_parts.query)
path = url_parts.path.replace(static_params['slug'], '')
for matcher in cls._matchers_by_path.get(path, []):
matcher_params = static_params.get(matcher.get_name())
value = matcher.match(matcher_params, query_params)
if value:
name, item_id = value
timestamp = int(
(event.recorded_on - UNIX_EPOCH).total_seconds())
ret.append([name, item_id, timestamp, event.source])
return ret
@classmethod
def produce_aggregate(cls, course, student, static_value, event_items):
# separate events by location.
location_events = collections.defaultdict(list)
for sub_list in event_items:
for name, item_id, timestamp, source in sub_list:
location_events[(name, item_id)].append((timestamp, source))
# Sort events in each location by timestamp.
for events_list in location_events.itervalues():
events_list.sort()
# Cluster events into groups delimited by enter-page and exit-page
current_view = None
page_views = []
for location, events in location_events.iteritems():
name, item_id = location
for timestamp, source in events:
activity = {
'action': source,
'timestamp': timestamp,
}
if not current_view or source == 'enter-page':
current_view = {
'name': name,
'item_id': item_id,
'start': timestamp,
'activities': [activity]
}
page_views.append(current_view)
else:
current_view['activities'].append(activity)
if source == 'exit-page':
current_view['end'] = timestamp
current_view = None
page_views.sort(key=lambda v: v['start'])
return {'page_views': page_views}
@classmethod
def get_schema(cls):
activity = schema_fields.FieldRegistry('activity')
activity.add_property(schema_fields.SchemaField(
'action', 'Action', 'string',
description='A short string indicating the nature of the event, '
'such as "enter", "exit", "submit_assessment", "check_answer"'))
activity.add_property(schema_fields.SchemaField(
'timestamp', 'Timestamp', 'timestamp',
description='Timestamp when the event occurred'))
page_view = schema_fields.FieldRegistry('page_view')
page_view.add_property(schema_fields.SchemaField(
'name', 'Name', 'string',
description='Name of the kind of page being shown. This is a '
'short string, such as "unit", "lesson", "enroll", "unenroll", '
'etc. The full list of these can be found in '
'coursebuilder/modules/analytics/student_events.py.'))
page_view.add_property(schema_fields.SchemaField(
'item_id', 'Item ID', 'string', optional=True,
description='Identity of the kind of page in question, if '
'the page may have more than one instance. E.g., units and '
'lessons have IDs; the forum, enroll and unenroll pages do not.'))
page_view.add_property(schema_fields.SchemaField(
'start', 'Start', 'timestamp',
description='Timestamp when the page was entered.'))
page_view.add_property(schema_fields.SchemaField(
'end', 'End', 'timestamp', optional=True,
description='Timestamp when the page was exited. '
'Note that this field may be blank if we are missing '
'the exit event. Also note that this field may be '
'extremely misleading - users may leave the page open while '
'doing other things. You should arrange to clip this value '
'at some reasonable maximum, and impute either the average '
'or the median value when this field is blank.'))
page_view.add_property(schema_fields.FieldArray(
'activities', 'Activities', item_type=activity))
page_views = schema_fields.FieldArray(
'page_views', 'Page Views', item_type=page_view,
description='User activity events for this student, grouped by '
'page enter/exit.')
return page_views
@classmethod
def register_matcher(cls, matcher):
name = matcher.get_name()
if name in cls._matchers_by_name:
raise ValueError(
'Page event matcher named "%s" already registered.' % name)
cls._matchers_by_name[name] = matcher
cls._matchers_by_path[matcher.get_path_match()].append(matcher)
@classmethod
def unregister_matcher(cls, matcher):
name = matcher.get_name()
if name in cls._matchers_by_name:
matcher = cls._matchers_by_name[name]
del cls._matchers_by_name[name]
for matcher_list in cls._matcher_by_path.itervalues():
matcher_list.remove(matcher)
def register_base_course_matchers():
PageEventAggregator.register_matcher(UnitMatcher)
PageEventAggregator.register_matcher(AssessmentMatcher)
PageEventAggregator.register_matcher(
PathOnlyMatcher('course', '/course'))
PageEventAggregator.register_matcher(
PathOnlyMatcher('enroll', '/register_matcher'))
PageEventAggregator.register_matcher(
PathOnlyMatcher('announcements', '/announcements'))
PageEventAggregator.register_matcher(
PathOnlyMatcher('forum', '/forum'))
PageEventAggregator.register_matcher(
PathOnlyMatcher('preview', '/preview'))
PageEventAggregator.register_matcher(
PathOnlyMatcher('answer', '/answer'))
PageEventAggregator.register_matcher(
PathOnlyMatcher('unenroll', '/student/unenroll'))
def unregister_base_course_matchers():
PageEventAggregator.unregister_matcher(UnitMatcher)
PageEventAggregator.unregister_matcher(AssessmentMatcher)
PageEventAggregator.unregister_matcher(
PathOnlyMatcher('course', '/course'))
PageEventAggregator.unregister_matcher(
PathOnlyMatcher('enroll', '/register_matcher'))
PageEventAggregator.unregister_matcher(
PathOnlyMatcher('announcements', '/announcements'))
PageEventAggregator.unregister_matcher(
PathOnlyMatcher('forum', '/forum'))
PageEventAggregator.unregister_matcher(
PathOnlyMatcher('preview', '/preview'))
PageEventAggregator.unregister_matcher(
PathOnlyMatcher('answer', '/answer'))
PageEventAggregator.unregister_matcher(
PathOnlyMatcher('unenroll', '/student/unenroll'))
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for analytics on course dashboard pages."""
__author__ = ['Michael Gainer (mgainer@google.com)']
import collections
from common import schema_fields
from models import transforms
from modules.analytics import student_aggregate
class LocationAggregator(student_aggregate.AbstractStudentAggregationComponent):
@classmethod
def get_name(cls):
return 'location'
@classmethod
def get_event_sources_wanted(cls):
return ['enter-page', 'exit-page']
@classmethod
def build_static_params(cls, app_context):
return None
@classmethod
def process_event(cls, event, static_params):
content = transforms.loads(event.data)
if 'loc' not in content:
return None
loc = content['loc']
return loc.get('country'), loc.get('region'), loc.get('city')
@classmethod
def produce_aggregate(cls, course, student, static_params, event_items):
locations = collections.defaultdict(int)
for location in event_items:
locations[tuple(location)] += 1
ret = []
for location, count in locations.iteritems():
country, region, city = location
item = {
'frequency': float(count) / len(event_items),
}
if country:
item['country'] = country
if region:
item['region'] = region
if city:
item['city'] = city
ret.append(item)
return {'location_frequencies': ret}
@classmethod
def get_schema(cls):
location_frequency = schema_fields.FieldRegistry('location_frequency')
location_frequency.add_property(schema_fields.SchemaField(
'country', 'Country', 'string', optional=True,
description='An ISO-3166-1 two-character country code.'))
location_frequency.add_property(schema_fields.SchemaField(
'region', 'Region', 'string', optional=True,
description='A string describing a region within a country. '
'The format and content of this string may vary widely depending '
'on the specific country\'s customs, but this will generally '
'correspond to a top-level political division within the country.'))
location_frequency.add_property(schema_fields.SchemaField(
'city', 'City', 'string', optional=True,
description='A string describing a town or city. As with region, '
'local usage and custom will dictate the values here. This is '
'not necessarily the lowest-level political division - e.g., '
'this would be "New York", rather than "The Bronx"'))
location_frequency.add_property(schema_fields.SchemaField(
'frequency', 'Frequency', 'number',
description='A floating point number greater than zero and less '
'than or equal to 1.0. Indicates the relative frequency of the '
'location in responses from this user. The sum of all the '
'frequency values should add up to 1.0. The most-frequent '
'location is listed first in the array.'))
return schema_fields.FieldArray(
'location_frequencies', 'Location Frequencies',
item_type=location_frequency,
description='List of all locations seen for this user, in '
'descending order by proportion of responses.')
class LocaleAggregator(student_aggregate.AbstractStudentAggregationComponent):
@classmethod
def get_name(cls):
return 'locale'
@classmethod
def get_event_sources_wanted(cls):
return ['enter-page', 'exit-page']
@classmethod
def build_static_params(cls, app_context):
return None
@classmethod
def process_event(cls, event, static_params):
content = transforms.loads(event.data)
if 'loc' not in content:
return None
loc = content['loc']
locale = (loc.get('locale') or
loc.get('page_locale') or
loc.get('language', 'UNKNOWN').split(',')[0])
return locale
@classmethod
def produce_aggregate(cls, course, student, static_params, event_items):
locales = collections.defaultdict(int)
for locale in event_items:
locales[locale] += 1
ret = []
for locale, count in locales.iteritems():
ret.append({
'locale': locale,
'frequency': float(count) / len(event_items)
})
return {'locale_frequencies': ret}
@classmethod
def get_schema(cls):
"""Provide schema; override default schema generated from DB type."""
locale_frequency = schema_fields.FieldRegistry('locale_frequency')
locale_frequency.add_property(schema_fields.SchemaField(
'locale', 'Locale', 'string',
description='A string indicating language and possibly regional '
'variation. Always starts with an ISO-639-1 two-character '
'lanaguage code. If the language is used in multiple countries, '
'this is followed with an underscore ("_") character, and then '
'an ISO-3166-1 two-character country code. E.g., "en_US"'))
locale_frequency.add_property(schema_fields.SchemaField(
'frequency', 'Frequency', 'number',
description='A floating point number greater than zero and less '
'than or equal to 1.0. Indicates the relative frequency of the '
'locale in responses from this user. The sum of all the '
'frequency values should add up to 1.0. The most-frequent '
'locale is listed first in the array.'))
return schema_fields.FieldArray(
'locale_frequencies', 'Locale Frequencies',
item_type=locale_frequency,
description='List of all locales seen for this user, in '
'descending order by proportion of responses.')
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for analytics on course dashboard pages."""
__author__ = ['Michael Gainer (mgainer@google.com)']
import collections
from common import schema_fields
from models import transforms
from modules.analytics import student_aggregate
class UserAgentAggregator(
student_aggregate.AbstractStudentAggregationComponent):
@classmethod
def get_name(cls):
return 'user_agent'
@classmethod
def get_event_sources_wanted(cls):
return ['enter-page', 'exit-page']
@classmethod
def build_static_params(cls, app_context):
return None
@classmethod
def process_event(cls, event, static_params):
content = transforms.loads(event.data)
return content.get('user_agent')
@classmethod
def produce_aggregate(cls, course, student, static_params, event_items):
user_agents = collections.defaultdict(int)
for user_agent in event_items:
user_agents[user_agent] += 1
ret = []
for user_agent, count in user_agents.iteritems():
ret.append({
'user_agent': user_agent,
'frequency': float(count) / len(event_items),
})
return {'user_agent_frequencies': ret}
@classmethod
def get_schema(cls):
user_agent_frequency = schema_fields.FieldRegistry(
'user_agent_frequency')
user_agent_frequency.add_property(schema_fields.SchemaField(
'user_agent', 'User Agent', 'string',
description='User-Agent string as reported by a browser.'))
user_agent_frequency.add_property(schema_fields.SchemaField(
'frequency', 'Frequency', 'number',
description='A floating point number greater than zero and less '
'than or equal to 1.0. Indicates the relative frequency of the '
'user_agent in responses from this user. The sum of all the '
'frequency values should add up to 1.0. The most-frequent '
'user_agent is listed first in the array.'))
return schema_fields.FieldArray(
'user_agent_frequencies', 'User Agent Frequencies',
item_type=user_agent_frequency,
description='List of all User-Agents for this user, in '
'descending order by proportion of responses.')
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DB Entity and classes to manage the creation and modification of clusters.
"""
__author__ = 'Milagro Teruel (milit@google.com)'
import appengine_config
import collections
import json
import math
import os
import urllib
import zlib
from mapreduce import context
from common import schema_fields
from controllers import utils
from models import courses
from models import jobs
from models import models
from models import progress
from models import transforms
from models import data_sources
from models.entities import BaseEntity
from modules.analytics import student_aggregate
from modules.dashboard import student_answers_analytics
from modules.dashboard import dto_editor
from google.appengine.ext import db
DIM_TYPE_UNIT = 'u'
DIM_TYPE_LESSON = 'l'
DIM_TYPE_QUESTION = 'q'
DIM_TYPE_UNIT_VISIT = 'uv'
DIM_TYPE_UNIT_PROGRESS = 'up'
DIM_TYPE_LESSON_PROGRESS = 'lp'
# All of the possible fields that can be in a dimension
DIM_TYPE = 'type'
DIM_ID = 'id'
DIM_HIGH = 'high' # The upper bound. Optional
DIM_LOW = 'low' # The lower bound. Optional
DIM_EXTRA_INFO = 'extra-info' # Optional
DIM_VALUE = 'value' # For students vectors. Optional
class ClusterEntity(BaseEntity):
"""Representation of a cluster used for clasification of students.
A cluster is defined by a set of dimensions and a range of numeric values
for each dimension. For dimensions with boolean values, they must be
converted to a numeric representation. The identifier for a dimension is
the type (unit, lesson, question...) plus the id of this type.
The attribute data contains a json dictionary with the following structure:
{
'name': 'string with name of cluster',
'description': 'string with description of the cluster',
'vector': [{dictionary dimension 1}, {dictionary dimension 2}, ... ]
}
The value of 'vector' is a list with one dictionary for each dimension.
Example of dimension:
{
clustering.DIM_TYPE: clustering.DIM_TYPE_UNIT,
clustering.DIM_ID: 1,
clustering.DIM_LOW: 0,
clustering.DIM_HIGH: 50,
clustering.DIM_EXTRA_INFO: ''
}
Dimension-extra-info is a field for any information needed to
calculate the value of the dimension. It is also a json dictionary.
The same question can be used several times inserting it into different
units or lessons. we distinguish this different uses, and consequently
the id of a question dimension is constructed also with the ids of the
unit and lesson in wich the question was found. To get this id, use the
function pack_question_dimid. The inverse function is
unpack_question_dimid.
A question can also appear several times in the same unit and lesson. In
that case, we consider all usages as a single question dimension for
compatibility with the information in StudentAggregateEntity.
"""
# TODO(milit): Add an active/inactive property to exclude the cluster
# from calculations and visualizations without having to delete it.
data = db.TextProperty(indexed=False)
def pack_question_dimid(unit_id, lesson_id, question_id):
"""Constructs the dimension id for a question using unit and lesson id.
Args:
unit_id: a number or string indicating the unit id.
lesson_id: a number, string or None indicating the lesson id.
question_id: a number or string indicating the question id.
Returns:
A string with the dimension id."""
return ':'.join((str(unit_id), str(lesson_id), str(question_id)))
def unpack_question_dimid(dimension_id):
"""Decompose the dimension id into unit, lesson and question id.
Returns:
A tuple unit_id, lesson_id, question_id.
unit_id and question_id are strings. lesson_id can be a string or
None.
"""
unit_id, lesson_id, question_id = dimension_id.split(':')
if lesson_id == 'None':
lesson_id = None
return unit_id, lesson_id, question_id
class ClusterDTO(object):
"""Data transfer object for ClusterEntity."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def name(self):
return self.dict.get('name', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def vector(self):
return self.dict.get('vector', [])
class ClusterDAO(models.BaseJsonDao):
DTO = ClusterDTO
ENTITY = ClusterEntity
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeId
class ClusterDataSource(data_sources.SynchronousQuery):
"""Gets the information of the available clusters in the course.
Renders the jinja template clustering.html.
"""
@staticmethod
def fill_values(app_context, template_values):
"""Sets values into the dict used to fill out the Jinja template."""
template_values['clusters'] = ClusterDAO.get_all()
edit_urls = []
for cluster in template_values['clusters']:
params = urllib.urlencode({
'action' : 'edit_cluster',
'key': cluster.id})
edit_urls.append('dashboard?{}'.format(params))
template_values['edit_urls'] = edit_urls
def _has_right_side(dim):
"""Returns True if the value of dim[DIM_HIGH] is not None or ''."""
return dim.get(DIM_HIGH) != None and dim.get(DIM_HIGH) != ''
def _has_left_side(dim):
"""Returns True if the value of dim[DIM_LOW] is not None or ''."""
return dim.get(DIM_LOW) != None and dim.get(DIM_LOW) != ''
def _add_unit_visits(unit, result):
new_dim = {
DIM_TYPE: DIM_TYPE_UNIT_VISIT,
DIM_ID: unit.unit_id,
'name': unit.title + ' (visits)'
}
result.append(new_dim)
def _add_unit_and_content(unit, result):
"""Adds the score dimensions for units and its lessons and questions."""
# The content of an assessment is indicated by a lesson_id of None.
# Inside that lesson we can find all the questions added directly
# to the assessment.
unit_dict = {
DIM_TYPE: DIM_TYPE_UNIT, # Unit or assessment
DIM_ID: unit['unit_id'],
'name': unit['title']} # Name won't be saved in ClusterEntity
result.append(unit_dict)
unit_scored_lessons = 0
for item in unit['contents']:
lesson_id = item.get('lesson_id')
# A unit may have a pre or post assessment, in that case the item
# has unit_id, not a lesson_id.
included_assessment_id = item.get('unit_id')
lesson_title = item.get('title')
if lesson_title and lesson_id and item.get('tallied'):
result.append({
DIM_TYPE: DIM_TYPE_LESSON,
DIM_ID: lesson_id,
'name': lesson_title})
unit_scored_lessons += 1
elif included_assessment_id and lesson_title:
result.append({
DIM_TYPE: DIM_TYPE_UNIT,
DIM_ID: included_assessment_id,
'name': lesson_title})
unit_scored_lessons += 1
# If lesson is not tallied (graded) is not considered a dimension
for question in item['questions']:
if included_assessment_id:
question_id = pack_question_dimid(
included_assessment_id, None, question['id'])
else:
question_id = pack_question_dimid(
unit['unit_id'], lesson_id, question['id'])
result.append({
DIM_TYPE: DIM_TYPE_QUESTION,
DIM_ID: question_id,
'name': question['description']})
# This should affect the result list as well.
unit_dict[DIM_EXTRA_INFO] = transforms.dumps(
{'unit_scored_lessons': unit_scored_lessons})
def _add_unit_and_lesson_progress(unit, course, result):
"""Adds the dimensions for the progress of units and lessons.
The progress is obtained from the StudentPropertyEntity."""
result.append({
DIM_TYPE: DIM_TYPE_UNIT_PROGRESS,
DIM_ID: unit.unit_id,
'name': unit.title + ' (progress)'
})
# TODO(milit): Add better order or indications of the structure of
# content.
for lesson in course.get_lessons(unit.unit_id):
result.append({
DIM_TYPE: DIM_TYPE_LESSON_PROGRESS,
DIM_ID: lesson.lesson_id,
'name': lesson.title + ' (progress)',
DIM_EXTRA_INFO: transforms.dumps({'unit_id': unit.unit_id})
})
def get_possible_dimensions(app_context):
"""Returns a list of dictionaries with all possible dimensions.
Any scored unit, lessons, assessment or question can be a dimension. If a
question is used in differents units and lessons, then a dimension will
be created for each use of the question. However, if the question in used
twice or more in the same unit and lesson, then only one dimension will
be created for this question, unit and lesson.
Aditionaly, units and assessments have a dimension for the number of
visits to the page.
For more details in the structure of dimensions see ClusterEntity
documentation.
"""
datasource = student_answers_analytics.OrderedQuestionsDataSource()
template_values = {}
# This has extra information but it was already implemented.
# Also, the OrderedQuestionsDataSource takes care of the case
# where assessments are used as pre- or post- items in Units, so
# we don't have to code for that case here.
datasource.fill_values(app_context, template_values)
units_with_content = {u['unit_id']: u for u in template_values['units']}
result = []
course = courses.Course(None, app_context)
for unit in course.get_units():
_add_unit_visits(unit, result)
if not unit.is_assessment():
_add_unit_and_lesson_progress(unit, course, result)
if unit.unit_id in units_with_content:
# Adding the lessons and questions of the unit
_add_unit_and_content(units_with_content[unit.unit_id], result)
return result
class ClusterRESTHandler(dto_editor.BaseDatastoreRestHandler):
"""REST Handler for ClusterEntity model."""
URI = '/rest/cluster'
XSRF_TOKEN = 'cluster-edit'
DAO = ClusterDAO
SCHEMA_VERSIONS = ['1.0']
REQUIRED_MODULES = []
EXTRA_JS_FILES = ['cluster_rest.js']
EXTRA_CSS_FILES = []
ADDITIONAL_DIRS = [os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'analytics')]
TYPES_INFO = { # The js script file depend on this dictionary.
DIM_TYPE_QUESTION: 'question',
DIM_TYPE_LESSON: 'lesson',
DIM_TYPE_UNIT: 'unit',
DIM_TYPE_UNIT_VISIT: 'unit_visit',
DIM_TYPE_UNIT_PROGRESS: 'unit_progress',
DIM_TYPE_LESSON_PROGRESS: 'lesson_progress',
}
@staticmethod
def pack_id(dim_id, dim_type):
"""Concatenates the id and type of the dimension"""
return '{}---{}'.format(dim_id, dim_type)
@staticmethod
def unpack_id(packed_id):
"""Unpacks the id and type of the dimension"""
return packed_id.split('---')
@classmethod
def get_schema(cls, app_context=None):
cluster_schema = schema_fields.FieldRegistry(
'Cluster Definition',
description='cluster definition',
extra_schema_dict_values={'className': 'cluster-container'})
cluster_schema.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
cluster_schema.add_property(schema_fields.SchemaField(
'name', 'Name', 'string', optional=False,
extra_schema_dict_values={'className': 'cluster-name'}))
cluster_schema.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'cluster-description'}))
dimension = schema_fields.FieldRegistry('Dimension',
extra_schema_dict_values={'className': 'cluster-dim'})
to_select = []
dim_types = {}
if app_context:
dimensions = get_possible_dimensions(app_context)
for dim in dimensions:
select_id = cls.pack_id(dim[DIM_ID], dim[DIM_TYPE])
to_select.append((select_id, dim['name']))
dim_types[select_id] = dim[DIM_TYPE]
dimension.add_property(schema_fields.SchemaField(
DIM_ID, 'Dimension Name', 'string', i18n=False,
extra_schema_dict_values={'className': 'dim-name'},
select_data=to_select))
# Only description for the first dimension. All the descriptions
# are in the cluster_rest.js file.
dimension.add_property(schema_fields.SchemaField(
DIM_LOW, 'Minimum number of visits to the page', 'string',
i18n=False, optional=True,
extra_schema_dict_values={'className': 'dim-range-low'}))
dimension.add_property(schema_fields.SchemaField(
DIM_HIGH, 'Maximum number of visits to the page', 'string',
i18n=False, optional=True,
extra_schema_dict_values={'className': 'dim-range-high'}))
dimension_array = schema_fields.FieldArray(
'vector', '', item_type=dimension,
description='Dimensions of the cluster. Add a new dimension '
'for each criteria the student has to acomplish to be '
'included in the cluster',
extra_schema_dict_values={
'className': 'cluster-dim-container',
'listAddLabel': 'Add a dimension',
'listRemoveLabel': 'Delete dimension',
'dim_types': dim_types,
'types_info': cls.TYPES_INFO})
cluster_schema.add_property(dimension_array)
return cluster_schema
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'name': '',
'description': '',
'vector': []}
def transform_for_editor_hook(self, item_dict):
"""Packs the id and type for the select field in the html."""
for dim in item_dict['vector']:
dim[DIM_ID] = ClusterRESTHandler.pack_id(dim[DIM_ID],
dim[DIM_TYPE])
return item_dict
def validate(self, item_dict, key, schema_version, errors):
"""Validates the user input.
The cluster must:
- Have a name
- Have numeric values for the fields low and high of all
dimensions.
- Have a smaller value in the low field than in the high field.
This function completes the low and high ranges with None values. Also
divides the id from the select into id and type.
"""
if not item_dict['name']:
errors.append('Empty name.')
error_str = ('Non numeric value in dimension '
'range (dimension number {}).')
# Convert to float and complete the missing ranges with None.
for index, dim in enumerate(item_dict['vector']):
if _has_right_side(dim):
try:
dim[DIM_HIGH] = float(dim[DIM_HIGH])
except ValueError:
errors.append(error_str.format(index))
else:
dim[DIM_HIGH] = None
if _has_left_side(dim):
try:
dim[DIM_LOW] = float(dim[DIM_LOW])
except ValueError:
errors.append(error_str.format(index))
else:
dim[DIM_LOW] = None
if (_has_left_side(dim) and _has_right_side(dim)
and dim[DIM_HIGH] < dim[DIM_LOW]):
errors.append('Wrong range interval in dimension'
'number {}'.format(index))
# Unpack the select id.
dim[DIM_ID], dim[DIM_TYPE] = ClusterRESTHandler.unpack_id(
dim[DIM_ID])
def pre_save_hook(self, dto):
"""Filter out dimensions with missing start- and end- range."""
dto.dict['vector'] = [dim for dim in dto.dict['vector']
if _has_left_side(dim) or _has_right_side(dim)]
class StudentVector(BaseEntity):
"""Representation of a single student based on a fixed set of dimensions.
The attribute vector stores the value of the student for each possible
dimension. This value must be a number, and it is generated by the job
StudentVectorGenerator. The information is organized in a dictionary,
for example:
{
DIM_TYPE: clustering.DIM_TYPE_QUESTION,
DIM_ID: 3,
DIM_VALUE: 60
}
"""
vector = db.TextProperty(indexed=False)
# TODO(milit): add a data source type so that all entities of this type
# can be exported via data pump for external analysis.
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
@staticmethod
def get_dimension_value(vector, dim_id, dim_type):
"""Returns the value of the dimension with the given id and type.
Return None if there is no matching dimension.
Args:
vector: A list of dictionaries. Corresponds to the StudentVector
vector attribute unpacked.
"""
candidates = [dim[DIM_VALUE] for dim in vector
if str(dim[DIM_ID]) == str(dim_id) and
dim[DIM_TYPE] == dim_type]
if candidates:
return candidates[0]
class StudentClusters(BaseEntity):
"""Representation of the relation between StudentVector and ClusterEntity.
There is a StudentClusters entity for each StudentVector, created by the
ClusteringGenerator job. The key name corresponds to the key_name of the
StudentVector entity.
The attribute clusters is a dictionary mapping ClusterEntity ids to
distance values for a given distance type (Hamming as default). This
distances are claculated using the job ClusteringGenerator. For example:
{'1': 3, '2': 0, ... }
"""
clusters = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentVectorGenerator(jobs.MapReduceJob):
"""A map reduce job to create StudentVector.
The data cames from StudentAggregateEntity and StudentPropertyEntity.
This job updates the vector field in the associated StudentVector, or
creates a new one if there is none. This vector has a value for each
score dimension type, calculated from the submissions field of
StudentAggregateEntity as follows:
Questions: The last weighted score of the question.
Lessons: The last weighted score of the lesson.
Units or Assessments: The average of all the scored lessons in
the unit or assessment. If no lessons, then the unit has a score
by itself.
The unit visit dimension is the number of 'enter-page' events registered
for the unit in the page_views fiels of StudentAggregateEntity.
The unit and lesson progress dimensions are the same as the student
progress in StudentPropertyEntity.
NOTE: StudentAggregateEntity is created by the job
StudentAggregateGenerator, so they have to run one after the other.
"""
# This dictionary maps each dimension type to a function that extracts
# its value from a StudentAggregateEntity data field. The function receives
# two arguments, the data relevant to the dimension as list of
# dictionaries and the dimension dictionary. The data is the output of the
# function _inverse_submission_data.
# To define a new dimension type you must define the function and include
# it here. That way we avoid changing the map function.
DIMENSION_FUNCTIONS = {
DIM_TYPE_QUESTION: '_get_question_score',
DIM_TYPE_LESSON: '_get_lesson_score',
DIM_TYPE_UNIT: '_get_unit_score',
DIM_TYPE_UNIT_VISIT: '_get_unit_visits',
DIM_TYPE_UNIT_PROGRESS: '_get_unit_progress',
DIM_TYPE_LESSON_PROGRESS: '_get_lesson_progress',
}
@classmethod
def get_function_for_dimension(cls, dimension_type):
"""Returns the function to calculate the score of a dimension type.
The mapping between dimension types and function names is in the
class attribute DIMENSION_FUNCTIONS."""
return getattr(cls, cls.DIMENSION_FUNCTIONS[dimension_type],
lambda x, y: 0)
@staticmethod
def get_description():
return 'StudentVector generation'
@classmethod
def entity_class(cls):
return student_aggregate.StudentAggregateEntity
def build_additional_mapper_params(self, app_context):
return {'possible_dimensions': get_possible_dimensions(app_context)}
@staticmethod
def map(item):
"""Updates the values in vector.
Creates a new StudentVector using the id of the item, a
StudentAggregateEntity. Calculates the value for every dimension
from the assessment data in item.
"""
mapper_params = context.get().mapreduce_spec.mapper.params
raw_data = transforms.loads(zlib.decompress(item.data))
raw_assessments = raw_data.get('assessments', [])
sub_data = StudentVectorGenerator._inverse_submission_data(
mapper_params['possible_dimensions'], raw_assessments)
raw_page_views = raw_data.get('page_views', [])
view_data = StudentVectorGenerator._inverse_page_view_data(
raw_page_views)
progress_data = None
user_id = item.key().name()
student = models.Student.get_student_by_user_id(user_id)
if student:
raw_data = models.StudentPropertyEntity.get(
student, progress.UnitLessonCompletionTracker.PROPERTY_KEY)
if hasattr(raw_data, 'value') and raw_data.value:
progress_data = transforms.loads(raw_data.value)
if not (sub_data or view_data or progress_data):
return
vector = []
for dim in mapper_params['possible_dimensions']:
type_ = dim[DIM_TYPE]
if type_ == DIM_TYPE_UNIT_VISIT:
data_for_dimension = view_data[type_, str(dim[DIM_ID])]
elif type_ in [DIM_TYPE_UNIT_PROGRESS, DIM_TYPE_LESSON_PROGRESS]:
data_for_dimension = progress_data
else:
data_for_dimension = sub_data[type_, str(dim[DIM_ID])]
value = StudentVectorGenerator.get_function_for_dimension(
dim[DIM_TYPE])(data_for_dimension, dim)
new_dim = {
DIM_TYPE: dim[DIM_TYPE],
DIM_ID: dim[DIM_ID],
DIM_VALUE: value}
vector.append(new_dim)
StudentVector(key_name=str(item.key().name()),
vector=transforms.dumps(vector)).put()
@staticmethod
def reduce(item_id, values):
"""Empty function, there is nothing to reduce."""
pass
@staticmethod
def _inverse_submission_data(dimensions, raw_data):
"""Build a dictionary with the information from raw_data by dimension.
For each dimension builds an entry in the result. The value is a list
with all the submissions relevant to that dimension. The concept of
relevant is different for each type of dimension. For example, for a
unit the relevant data are the submissions of all lessons for that
unit.
Returns:
An instance of defaultdict with default empty list."""
result = collections.defaultdict(lambda: [])
for activity in raw_data:
activity_lesson = activity.get('lesson_id')
activity_unit = activity.get('unit_id')
# This creates aliasing but it's fine beacuse is read only.
# It only adds a copy of the timestamp for the questions.
result[DIM_TYPE_UNIT, activity_unit].append(activity)
result[DIM_TYPE_LESSON, activity_lesson].append(activity)
for submission in activity.get('submissions', []):
for answer in submission.get('answers', []):
question_id = answer.get('question_id')
answer['timestamp'] = submission['timestamp']
dim_id = pack_question_dimid(activity_unit,
activity_lesson, question_id)
result[DIM_TYPE_QUESTION, dim_id].append(answer)
return result
@staticmethod
def _inverse_page_view_data(raw_data):
"""Build a dictionary with the information from raw_data by dimension.
For each dimension builds an entry in the result. The value is a list
with all the submissions relevant to that dimension. In the case
of DIM_TYPE_UNIT_VISIT the relevant submissions are those
with name 'unit' or 'assessment'
Returns:
An instance of defaultdict with default empty list."""
result = collections.defaultdict(lambda: [])
for page_view in raw_data:
name = page_view.get('name')
if name not in ['unit', 'assessment']:
continue
item_id = page_view.get('item_id')
result[DIM_TYPE_UNIT_VISIT, item_id].append(page_view)
return result
@staticmethod
def _get_question_score(data, unused_dimension):
"""The score of a question is the last weighted score obtained.
If a question in present multiple times in the same submission, then
the score is the average weighted score of the question in that
submission. If there is no submission for the question the score is 0.
Args:
data: a list of dictionaries.
"""
if not data:
return 0
last_scores = []
last_timestamp = 0
for answer in data:
# Could be more than one question with the same timestamp
score = answer.get('weighted_score')
if score and answer['timestamp'] > last_timestamp:
last_scores = [score]
last_timestamp = answer['timestamp']
elif score and answer['timestamp'] == last_timestamp:
last_scores.append(score)
if last_scores:
return math.fsum(last_scores) / len(last_scores)
return 0
@staticmethod
def _get_lesson_score(data, dimension):
"""The score of a lesson is its last score."""
if not data:
return 0
for submission in data:
if ('lesson_id' in submission and 'last_score' in submission
and submission['lesson_id'] == str(dimension[DIM_ID])):
return submission['last_score']
return 0
@staticmethod
def _get_unit_score(data, dimension):
"""The score of a unit is the average score of its scored lessons.
If the unit has no lessons (assessment), the unit will have its
own score.
"""
if not data:
return 0
if not DIM_EXTRA_INFO in dimension:
scored_lessons = 1
else:
extra_info = json.loads(dimension[DIM_EXTRA_INFO])
if not 'unit_scored_lessons' in extra_info:
scored_lessons = 1
else:
scored_lessons = max(extra_info['unit_scored_lessons'], 1)
score = 0
for submission in data:
if ('unit_id' in submission and 'last_score' in submission
and submission['unit_id'] == str(dimension[DIM_ID])):
score += submission['last_score']
return score/float(scored_lessons)
@staticmethod
def _get_unit_visits(data, dimension):
if not data:
return 0
result = 0
for page_view in data:
activities = page_view.get('activities')
if not activities:
continue
for activity in activities:
if activity.get('action') == 'enter-page':
result += 1
return result
@staticmethod
def _get_unit_progress(data, dimension):
"""Reads the progress from the value of StudentPropertyEntity."""
# This value is obtained directly from the JSON dictionary
# in value because we can't pass the UnitLessonCompletionTracker
# object as a parameter of the map reduce to use the proper accessors.
if not data:
return 0
return data.get('u.{}'.format(dimension[DIM_ID]), 0)
@staticmethod
def _get_lesson_progress(data, dimension):
"""Reads the progress from the value of StudentPropertyEntity.
The dimension has to have a field DIM_EXTRA_INFO with the unit id."""
if not data:
return 0
extra_info = dimension.get(DIM_EXTRA_INFO)
if not extra_info:
return 0
extra_info = transforms.loads(extra_info)
if not extra_info:
return 0
unit_id = extra_info.get('unit_id')
if unit_id:
return data.get('u.{}.l.{}'.format(unit_id, dimension[DIM_ID]), 0)
return 0
def hamming_distance(vector, student_vector):
"""Return the hamming distance between a ClusterEntity and a StudentVector.
The hamming distance between an ClusterEntity and a StudentVector is the
number of dimensions in which the student value is not inside the vector
range. If a dimension is not present in the student vector, we assume its
value is 0. If a dimension is not present in the cluster_value, we assume
that every value is included in the range.
Params:
vector: the vector field of a ClusterEntity instance.
student_vector: the vector field of a StudentVector instance.
"""
# TODO(milit): As we are discarding all distances greater than
# ClusteringGenerator.MAX_DISTANCE, add it as a parameter so we stop
# calculating the distance once this limit is reached.
def fits_left_side(dim, value):
"""_has_left_side(dim) -> dim[DIM_LOW] <= value"""
return not _has_left_side(dim) or dim[DIM_LOW] <= value
def fits_right_side(dim, value):
"""_has_right_side(dim) -> dim[DIM_HIGH] >= value"""
return not _has_right_side(dim) or dim[DIM_HIGH] >= value
distance = 0
for dim in vector:
value = StudentVector.get_dimension_value(student_vector,
dim[DIM_ID], dim[DIM_TYPE])
if not value:
value = 0
if not fits_left_side(dim, value) or not fits_right_side(dim, value):
distance += 1
return distance
class ClusteringGenerator(jobs.MapReduceJob):
"""A map reduce job to calculate which students belong to each cluster.
This job calculates the distance between each StudentVector and each
ClusterEntity using the Hamming distance. The value of the distance is
going to be stored in the StudentVector attibute clusters. This attribute
is a json dictionary where the keys are the ids (as strings) of the
clusters and the values are the distances. All previous distances are
discarded.
All distances that don't fall in the range (MIN_DISTANCE, MAX_DISTANCE)
are ignored and not stored in the StudentVector entity.
In the reduce step it returns calculated two statistics: the number of
students in each cluster and the intersection of pairs of clusters.
"""
MAX_DISTANCE = 2
# TODO(milit): Add settings to disable heavy statistics.
@staticmethod
def get_description():
return 'StudentVector clusterization'
@classmethod
def entity_class(cls):
return models.Student
def build_additional_mapper_params(self, app_context):
clusters = [{'id': cluster.id, 'vector': cluster.vector}
for cluster in ClusterDAO.get_all()]
return {
'clusters': clusters,
'max_distance': getattr(self, 'MAX_DISTANCE', 2)
}
@staticmethod
def map(item):
"""Calculates the distance from the StudentVector to ClusterEntites.
Stores this distances in the clusters attibute of item. Ignores
distances not in range (MIN_DISTANCE, MAX_DISTANCE).
Yields:
Pairs (key, value). There are two types of keys:
1. A cluster id: the value is a tuple (student_id, distance).
2. A pair of clusters ids: the value is a 3-uple
(student_id, distance1, distance2)
distance1 is the distance from the student vector to the
cluster with the first id of the tuple and distance2 is
the distance to the second cluster in the tuple.
3. A string 'student_count' with value 1.
One result is yielded for every cluster id and pair of clusters
ids. If (cluster1_id, cluster2_id) is yielded, then
(cluster2_id, cluster1_id) won't be yielded.
"""
student = StudentVector.get_by_key_name(item.user_id)
if student:
mapper_params = context.get().mapreduce_spec.mapper.params
max_distance = mapper_params['max_distance']
clusters = {}
item_vector = transforms.loads(student.vector)
for cluster in mapper_params['clusters']:
distance = hamming_distance(cluster['vector'], item_vector)
if distance > max_distance:
continue
for cluster2_id, distance2 in clusters.items():
key = transforms.dumps((cluster2_id, cluster['id']))
value = (item.user_id, distance, distance2)
yield (key, transforms.dumps(value))
clusters[cluster['id']] = distance
to_yield = (item.user_id, distance)
yield(cluster['id'], transforms.dumps(to_yield))
clusters = transforms.dumps(clusters)
StudentClusters(key_name=item.user_id, clusters=clusters).put()
yield ('student_count', 1)
@staticmethod
def combine(key, values, previously_combined_outputs=None):
"""Combiner function called before the reducer.
Params:
key: the value of the key from the map output.
values: the values for that key from the map output.
previously_combined_outputs: a list or a RepeatedScalarContainer
that holds the combined output for other instances for the
same key."""
if key != 'student_count':
for value in values:
yield value
for value in previously_combined_outputs:
yield value
else:
total = sum([int(value) for value in values])
if previously_combined_outputs is not None:
total += sum([int(value) for value in
previously_combined_outputs])
yield total
@staticmethod
def reduce(item_id, values):
"""
This function can take two types of item_id (as json string).
A number: the values are 2-uples (student_id, distance) and is
used to calculate a count statistic.
A list: the item_id holds the IDs of two clusters and the value
corresponds to 3-uple (student_id, distance1, distance2). The
value is used to calculate an intersection stats.
A string 'student_count': The values is going to be a list of
partial sums of numbers.
Yields:
A json string representing a tuple ('stat_name', (item_id,
distances)). For count stats, the i-th number in the distances
list corresponds to the number of students with distance equal to
i to the vector. For intersection, the i-th number in the distance
list corresponds to the students with distance less or equal than
i to both clusters. item_id is the same item_id received as
a parameter, but converted from the json string.
For the stat student_count the value is a single number
representing the total number of StudentVector
"""
if item_id == 'student_count':
yield (item_id, sum(int(value) for value in values))
else:
item_id = transforms.loads(item_id)
distances = collections.defaultdict(lambda: 0)
if isinstance(item_id, list):
stat_name = 'intersection'
for value in values:
value = transforms.loads(value)
# If a student vector has a distance 1 to cluster A
# and distance 3 to cluster B, then it has a
# distance of 3 (the greater) to the intersection
intersection_distance = max(value[1], value[2])
distances[intersection_distance] += 1
item_id = tuple(item_id)
else:
stat_name = 'count'
for value in values:
value = transforms.loads(value)
distances[value[1]] += 1
distances = dict(distances)
list_distances = [0] * (max([int(k) for k in distances]) + 1)
for distance, count in distances.items():
list_distances[int(distance)] = count
if stat_name == 'intersection':
# Accumulate the distances.
for index in range(1, len(list_distances)):
list_distances[index] += list_distances[index - 1]
yield transforms.dumps((stat_name, (item_id, list_distances)))
class TentpoleStudentVectorDataSource(data_sources.SynchronousQuery):
"""This datasource does not retrieve elements.
This datasource exists to put a button in the Visualization html that
allows the user to run the job StudentVectorGenerator and to create the
StudentVector entities. Also gives information about the state of the
StudentAggregateGenerator job, which is a requisite to run
StudentVectorGenerator.
However, it is NOT expected to retrieve the StudentVector entities for
display.
"""
@staticmethod
def required_generators():
return [StudentVectorGenerator]
@staticmethod
def fill_values(app_context, template_values, unused_gen):
"""Check if the StudentAggregateGenerator has run."""
job = student_aggregate.StudentAggregateGenerator(app_context).load()
if not job:
template_values['message'] = ('The student aggregated job has '
'never run.')
message_str = ('The student aggregated values where '
'last calculated on {}.')
last_update = getattr(job, 'updated_on', None)
if not last_update:
template_values['message'] = ('The student aggregated job has '
'never run.')
else:
template_values['message'] = message_str.format(
job.updated_on.strftime(utils.HUMAN_READABLE_DATETIME_FORMAT))
class ClusterStatisticsDataSource(data_sources.AbstractSmallRestDataSource):
"""Returns the values obtained by ClusteringGenerator."""
@staticmethod
def required_generators():
return [ClusteringGenerator]
@classmethod
def get_name(cls):
return 'cluster_statistics'
@classmethod
def get_title(cls):
return '' # Not used.
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
# Without schema the fetch_values function won't be called.
return 'List with dummy objects'.split()
@staticmethod
def _process_job_result(results):
def add_zeros(iterable, length):
return iterable + [0] * (length - len(iterable))
def process_count(value, count):
if value[0] not in count:
return
count[value[0]][1:] = add_zeros(value[1], max_distance + 1)
def process_intersection(value, count, inter):
cluster1, cluster2 = value[0]
if not (cluster2 in count and cluster1 in count):
return
map1 = id_mapping.index(cluster1)
map2 = id_mapping.index(cluster2)
for dist in range(max_distance + 1): # Include the last one
c1_count = sum(count[cluster1][1:dist + 2])
c2_count = sum(count[cluster2][1:dist + 2])
if dist >= len(value[1]): # Complete missing values
int_count = value[1][-1]
else:
int_count = value[1][dist] # We know is not empty
inter[dist]['count'][map1][map2] = int_count
percentage = round(int_count*100/float(student_count), 2)
inter[dist]['percentage'][map1][map2] = percentage
# P(c2 | c1) = count(c1 and c2) / count(c1)
probability = 0
if c1_count:
probability = round(int_count/float(c1_count), 2)
inter[dist]['probability'][map1][map2] = probability
# P(c1 | c2) = count(c1 and c2) / count(c2)
probability = 0
if c2_count:
probability = round(int_count/float(c2_count), 2)
inter[dist]['probability'][map2][map1] = probability
max_distance = ClusteringGenerator.MAX_DISTANCE
student_count = 1
count = {}
dimension_count = {}
for cluster in ClusterDAO.get_all():
count[cluster.id] = [cluster.name] + [0] * (max_distance + 1)
dimension_count[cluster.id] = len(cluster.vector)
id_mapping = count.keys()
name_mapping = [count[cid][0] for cid in id_mapping]
l = lambda: collections.defaultdict(l)
inter = [{'count': l(), 'percentage': l(), 'probability': l()}
for _ in range(max_distance + 1)]
# Process all counts first
for result in results:
stat, value = result
if stat == 'count':
process_count(value, count)
elif stat == 'student_count':
student_count = value
# Once counting is complete, process the intersections
for result in results:
stat, value = result
if stat != 'intersection':
continue
process_intersection(value, count, inter)
# Reprocess counts to eliminate non relevant information
for cluster_id in count:
dimension = dimension_count[cluster_id]
if dimension <= max_distance:
count[cluster_id] = add_zeros(
count[cluster_id][:dimension + 1], max_distance + 2)
other = student_count - sum(count[cluster_id][1:])
count[cluster_id].append(other)
extra_info = {'max_distance': max_distance}
return [count.values(), inter, name_mapping, extra_info]
@classmethod
def fetch_values(cls, unused_app_context, unused_source_context,
unused_schema, unused_catch_and_log, unused_page_number,
clustering_generator_job):
"""Returns the statistics calculated by clustering_generator_job.
The information extracted from the intersection data can be of three
types:
1. 'count' is the number of students in the intersection
2. 'percentage' is the percentage of students in the intersection
over the total of StudentVector entities in the db.
3. 'probability' of the cluster B given the cluster A is the count
of students in the intersection divided by the number of
students in A.
Returns:
A list of dictionaries and the page number, always 0. The list
has four elements:
1. The results of the count statistic: A matrix with the
format
[cluster_name, distance0, distance1, ... distanceN]
where distanceX is the number of students at distance X of
the cluster.
2. The results of the intersection statistics: A list of
dictionaries. The dictionary in position i contains the
infomation of students at distance less or equal than i.
The keys of the dictionaries are the types of data in the
values: 'count', 'percentage' or 'probability'.
The values are two level dictionary with the numbers of pairs
of clusters. The clusters are mapped with secuential numbers.
For example:
{'count': {0: {1: 1},
1: {2: 1},
3: {2: 0}},
'percentage': {0: {1: 16.67},
1: {2: 16.67},
3: {2: 0.00}},
'probability': {0: {1: 1.0}, 1: {0: 0.5, 2: 0.5},
2: {1: 0.5, 3: 0.0},
3: {2: 0.0}}}
Not all pairs are included in this intersection. If
an entry pair [a][b] is missing is safe to assume that the
intersection is in the entry [b][a] or is 0.
3. The mapping from cluster number to cluster name. A list where
the index indicate the number of the cluster in that position.
4. A dictionary with extra information. It has a key max_distance
and a numeric value.
"""
# This function is long and complicated, but it is so to send the data
# as much processed as possible to the javascript in the page.
# The information is adjusted to fit the graphics easily.
results = list(jobs.MapReduceJob.get_results(clustering_generator_job))
# data, page_number
return ClusterStatisticsDataSource._process_job_result(results), 0
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student aggregate collection for detailed YouTube interaction logs."""
__author__ = ['Michael Gainer (mgainer@google.com)']
import datetime
from common import schema_fields
from models import transforms
from modules.analytics import student_aggregate
UNIX_EPOCH = datetime.datetime(year=1970, month=1, day=1)
# This is YouTube's mapping from their 'data' field in YT events to meaning.
ACTION_ID_TO_NAME = {
-1: 'unstarted',
0: 'ended',
1: 'playing',
2: 'paused',
3: 'buffering',
5: 'video cued'
}
class YouTubeEventAggregator(
student_aggregate.AbstractStudentAggregationComponent):
@classmethod
def get_name(cls):
return 'youtube_event'
@classmethod
def get_event_sources_wanted(cls):
return ['tag-youtube-event']
@classmethod
def build_static_params(cls, unused_app_context):
return None
@classmethod
def process_event(cls, event, static_params):
data = transforms.loads(event.data)
video_id = data['video_id']
position = data['position']
action = data['data']
timestamp = timestamp = int(
(event.recorded_on - UNIX_EPOCH).total_seconds())
return (video_id, position, action, timestamp)
@classmethod
def produce_aggregate(cls, course, student, static_params, event_items):
# Sort by timestamp, then video ID, then position.
event_items.sort(key=lambda item: (item[3], item[0], item[1]))
youtube_interactions = []
prev_video_id = None
prev_position = 0
current_interaction = None
for event_item in event_items:
video_id, position, action, timestamp = event_item
# If we are seeing events either for a different video ID than
# last time, or we are on the same ID, but have rewound to 0,
# then call that a new interaction.
if video_id != prev_video_id or prev_position > 0 and position == 0:
current_interaction = {
'video_id': video_id,
'events': [],
}
youtube_interactions.append(current_interaction)
prev_video_id = video_id
prev_position = position
# And build the detail event, adding it to the current interaction.
event = {
'position': position,
'timestamp': timestamp,
}
if action in ACTION_ID_TO_NAME:
event['action'] = ACTION_ID_TO_NAME[action]
else:
event['action'] = str(action)
current_interaction['events'].append(event)
return {'youtube': youtube_interactions}
@classmethod
def get_schema(cls):
youtube_event = schema_fields.FieldRegistry('event')
youtube_event.add_property(schema_fields.SchemaField(
'position', 'Position', 'integer',
description='Offset from start of video, in seconds.'))
youtube_event.add_property(schema_fields.SchemaField(
'action', 'Action', 'string',
description='Type of event that has occurred. The types that '
'are known are: unstarted, ended, playing, paused, buffering, '
'and video cued. If YouTube adds more types of events than '
'these, they will be reported as a string version of the '
'integer event code supplied by YouTube. Please see YouTube '
'documentation for interpretation of unknown codes.'))
youtube_event.add_property(schema_fields.SchemaField(
'timestamp', 'Timestamp', 'timestamp',
description='Moment when event occurred.'))
youtube_interaction = schema_fields.FieldRegistry('interaction')
youtube_interaction.add_property(schema_fields.SchemaField(
'video_id', 'Video ID', 'string',
description='The ID of the YouTube video. E.g., Kdg2drcUjYI '))
youtube_interaction.add_property(schema_fields.FieldArray(
'events', 'YouTube Events', item_type=youtube_event,
description='A list of events describing an interaction with '
'a video. Note that these are grouped sequentially by '
'video ID from the raw stream. It is technically possible, '
'though unlikely, to get confusing results if multiple '
'videos are viewed simultaneously by one student.'))
youtube_interactions = schema_fields.FieldArray(
'youtube', 'YouTube Interactions', item_type=youtube_interaction,
description='A list of interactions with individual YouTube '
'video. These are ordered by the first interaction with a '
'given video ID, and group together multiple actions '
'within the same interaction.')
return youtube_interactions
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collect answers to questions from Event and provide to student aggregator."""
__author__ = ['Michael Gainer (mgainer@google.com)']
import datetime
import logging
import sys
from common import schema_fields
from models import event_transforms
from models import transforms
from modules.analytics import student_aggregate
class AnswersAggregator(student_aggregate.AbstractStudentAggregationComponent):
"""Plug-in to student aggregate for collecting answers to questions.
This class collects all answers to all questions in a course, both on
assessments and graded lessons as well as check-answers responses on
un-graded lessons.
"""
@classmethod
def get_name(cls):
return 'answers'
@classmethod
def get_event_sources_wanted(cls):
return ['submit-assessment', 'attempt-lesson', 'tag-assessment']
@classmethod
def build_static_params(cls, app_context):
return {
'questions_by_usage_id': (
event_transforms.get_questions_by_usage_id(app_context)),
'valid_question_ids': (
event_transforms.get_valid_question_ids()),
'group_to_questions': (
event_transforms.get_group_to_questions()),
'assessment_weights':
event_transforms.get_assessment_weights(app_context),
'unscored_lesson_ids':
event_transforms.get_unscored_lesson_ids(app_context),
}
@classmethod
def process_event(cls, event, static_params):
questions_info = static_params['questions_by_usage_id']
valid_question_ids = static_params['valid_question_ids']
group_to_questions = static_params['group_to_questions']
assessment_weights = static_params['assessment_weights']
timestamp = int(
(event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
content = transforms.loads(event.data)
answers = None
if event.source == 'submit-assessment':
answer_data = content.get('values', {})
# TODO(mgainer): handle assessment-as-form submissions. Current
# implementation only understands Question and QuestionGroup;
# forms are simply submitted as lists of fields.
# TODO(mgainer): Handle peer-review scoring
if not isinstance(answer_data, dict):
return
version = answer_data.get('version')
if version == '1.5':
answers = event_transforms.unpack_student_answer_1_5(
questions_info, valid_question_ids, assessment_weights,
group_to_questions, answer_data, timestamp)
else:
logging.warning('Unexpected version %s in submit-assessment '
'event handling', version)
elif event.source == 'attempt-lesson':
# Very odd that the version should be in the answers map....
version = content.get('answers', {}).get('version')
if version == '1.5':
answers = event_transforms.unpack_student_answer_1_5(
questions_info, valid_question_ids, assessment_weights,
group_to_questions, content, timestamp)
else:
logging.warning('Unexpected version %s in attempt-lesson '
'event handling', version)
elif event.source == 'tag-assessment':
answers = event_transforms.unpack_check_answers(
content, questions_info, valid_question_ids, assessment_weights,
group_to_questions, timestamp)
if not answers:
return None
answer_dicts = []
total_weighted_score = 0.0
for answer in answers:
if not isinstance(answer.answers, (tuple, list)):
stringified_answers = [unicode(answer.answers)]
else:
stringified_answers = [unicode(a) for a in answer.answers]
answer_dict = {
'question_id': answer.question_id,
'responses': stringified_answers,
}
answer_dict['score'] = float(answer.score)
answer_dict['weighted_score'] = float(answer.weighted_score)
total_weighted_score += answer.weighted_score
answer_dicts.append(answer_dict)
submission = {
'timestamp': answers[0].timestamp,
'answers': answer_dicts,
}
submission['weighted_score'] = total_weighted_score
assessment = {
'unit_id': str(answers[0].unit_id),
'lesson_id': str(answers[0].lesson_id),
'submissions': [submission],
}
return assessment
@classmethod
def produce_aggregate(cls, course, student, static_params, event_items):
unscored_lesson_ids = [
str(x) for x in static_params['unscored_lesson_ids']]
assessments = []
lookup = {}
for item in event_items:
key = (item['unit_id'], item['lesson_id'])
if key not in lookup:
assessments.append(item)
lookup[key] = item
else:
lookup[key]['submissions'].extend(item['submissions'])
# Note: need to do this the long way, since lessons may change
# back and forth from scored to unscored. Thus, submissions
# will not necessarily all have scores or all not have scores.
for assessment in assessments:
assessment['submissions'].sort(key=lambda s: s['timestamp'])
# Unscored lessons do not submit all questions on the page
# all-at-once; the individual questions are submitted one-by-one
# if/when a student clicks the "check answer" button. This being
# the case, there's no good meaning for min/max/etc. score.
# (Theoretically, we could time-box and deduplicate submissions
# that were submitted close together in time, but that work is not
# economical.)
if assessment['lesson_id'] not in unscored_lesson_ids:
first_score = None
last_score = None
min_score = sys.maxint
max_score = None
for submission in assessment['submissions']:
if 'weighted_score' in submission:
score = submission['weighted_score']
if first_score is None:
first_score = score
last_score = score
min_score = min(min_score, score)
max_score = max(max_score, score)
if first_score is not None:
assessment['first_score'] = first_score
if last_score is not None:
assessment['last_score'] = last_score
if max_score is not None:
assessment['max_score'] = max_score
assessment['min_score'] = min_score
return {'assessments': assessments}
@classmethod
def get_schema(cls):
answer = schema_fields.FieldRegistry('answer')
answer.add_property(schema_fields.SchemaField(
'question_id', 'Question ID', 'string'))
answer.add_property(schema_fields.SchemaField(
'score', 'Score', 'number', optional=True,
description='Raw score value for this question'))
answer.add_property(schema_fields.SchemaField(
'weighted_score', 'Weighted Score', 'number', optional=True,
description='Score for this question with all weights for '
'question instance, question group, and assessment applied.'))
answer.add_property(schema_fields.FieldArray(
'responses', 'Responses',
description='Responses to the question. There may be multiple '
'responses on questions permitting them',
item_type=schema_fields.SchemaField('response', 'Response',
'string')))
submission = schema_fields.FieldRegistry('sumbission')
submission.add_property(schema_fields.SchemaField(
'timestamp', 'Timestamp', 'timestamp'))
submission.add_property(schema_fields.FieldArray(
'answers', 'Answers', item_type=answer))
submission.add_property(schema_fields.SchemaField(
'weighted_score', 'Weighted Score', 'number', optional=True,
description='Score for this assessment with all weights for '
'question instance, question group, and assessment applied. '
'This field will be blank for answers to questions on '
'non-scored lessons.'))
assessment = schema_fields.FieldRegistry('assessment')
assessment.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string'))
assessment.add_property(schema_fields.SchemaField(
'lesson_id', 'Lesson ID', 'string', optional=True))
assessment.add_property(schema_fields.FieldArray(
'submissions', 'Submissions', item_type=submission,
description='Each submission of an assessment. Assessments '
'and graded lessons will have the same list of questions in '
'the "answers" field in each submission. In non-graded '
'lessons, each question is checked individually, so '
'submissions for such lessons will have only one response in '
'each submission.'))
assessment.add_property(schema_fields.SchemaField(
'min_score', 'Min Score', 'number', optional=True))
assessment.add_property(schema_fields.SchemaField(
'max_score', 'Max Score', 'number', optional=True))
assessment.add_property(schema_fields.SchemaField(
'first_score', 'First Score', 'number', optional=True))
assessment.add_property(schema_fields.SchemaField(
'last_score', 'Last Score', 'number', optional=True))
assessments = schema_fields.FieldArray(
'assessments', 'Assessments', item_type=assessment,
description='Every submission of every assessment and lesson '
'from this student.')
return assessments
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for analytics on course dashboard pages."""
__author__ = ['Michael Gainer (mgainer@google.com)']
import collections
import logging
import zlib
from mapreduce import context
from common import schema_fields
from common import utils as common_utils
from controllers import sites
from models import courses
from models import data_sources
from models import entities
from models import jobs
from models import models
from models import transforms
from google.appengine.api import datastore_types
from google.appengine.ext import db
class AbstractStudentAggregationComponent(object):
"""Allows modules to contribute to map/reduce on EventEntity by Student.
Extension modules that generate events data relating to students may wish
to make this information available via the data pump to BigQuery. This
can be done by having the individual module produce its own data source,
or by contributing to the Student aggregate .
Adding to the aggregate is slightly preferred, as that removes the need for
course administrators to separately push those data sources and to write
BigQuery SQL to do joins. Further, adding functionality here will gain some
cost savings by reducing the number of passes over the EventEntity table.
Note that any of the functions below can be provided either as
@classmethod or instance method. If using @classmethod, all functions
must be overridden to keep Python happy.
"""
def get_name(self):
"""Get short name for component.
Note that while we could use __name__ to get a usable name to
ensure registered components are unique, having get_name()
explicitly in the interface permits this interface to be
implemented as an instance.
"""
raise NotImplementedError()
def get_event_sources_wanted(self):
"""Give the matches to "source" in EventEntity this component wants.
E.g, "enter-page", "attempt-lesson" and so on.
Returns:
list of strings for event types we can handle.
"""
return []
# pylint: disable=unused-argument
def build_static_params(self, app_context):
"""Build any expensive-to-calculate items at course level.
This function is called once at the start of the map/reduce job so
that implementers can pre-calculate any facts that would be expensive
to regenerate on each call to process_event(). If no such facts are
required, return None. Any type of object may be returned.
Args:
app_context: A standard CB application context object.
Returns:
Any.
"""
return None
# pylint: disable=unused-argument
def process_event(self, event, static_params):
"""Handle one EventEntity. Called from map phase of map/reduce job.
This method is called once for each Event which has a "source" field
matching one of the strings returned from get_event_sources_wanted().
This function should produce a record that will be used below in
produce_aggregate(). The list of all items returned from this
function for each Student are provided to produce_aggregate().
Args:
event: an EventEntity.
static_params: the value from build_static_params(), if any.
Returns:
Any object that can be converted to a string via transforms.dumps(),
or None.
"""
return None
def produce_aggregate(self, course, student, static_params, event_items):
"""Aggregate event-item outputs. Called from reduce phase of M/R job.
For each Student in the course for which there were any EventEntity
recorded, this function is called with the accumulated return values
produced by process_event(), above. Note that since even the act of
registration generates events, every registered student will be
handled. Also note that this function will be called even for Student
entries for which no output was produced by process_event().
This method must produce a dict corresponding to the schema returned
from get_schema(), or return None.
Args:
course: The Course in which the student and the events are found.
student: the Student for which the events occurred.
static_params: the value from build_static_params(), if any.
event_items: a list of all the items produced by process_event()
for the given Student.
Returns:
A dict corresponding to the declared schema.
"""
raise NotImplementedError()
def get_schema(self):
"""Provide the partial schema for results produced.
This function may return a SchemaField, FieldArray or FieldRegistry.
This schema element will appear as a top-level component in the master
schema in the aggregate data source.
"""
raise NotImplementedError()
class StudentAggregateEntity(entities.BaseEntity):
"""Holds data aggregated from Event entites for a single Student.
As we run the registered sub-aggregators for the various event types,
the reduce step of our master map/reduce job will be presented with
summarized data for all events pertaining to a single Student. Rather
than write this large volume of data out to, say, BlobStore, we instead
prefer to write each Student's aggregated data to one record in the DB.
Doing this permits us to use existing paginated-rest-data-source logic
to provide the aggregated student data as a feed to the data pump."""
data = db.BlobProperty()
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentAggregateGenerator(jobs.MapReduceJob):
"""M/R job to aggregate data by student using registered plug-ins.
This class coordinates the work of plugin classes registered with
StudentAggregateComponentRegistry and combines their work into a single
StudentAggregateEntity record in the datastore. Plugin classes are
insulated from one another, and are permitted to fail individually without
compromising the results contributed for a Student by other plugins.
"""
@staticmethod
def get_description():
return 'student_aggregate'
@staticmethod
def entity_class():
return models.EventEntity
def build_additional_mapper_params(self, app_context):
schemas = {}
schema_names = {}
ret = {
'course_namespace': app_context.get_namespace_name(),
'schemas': schemas,
'schema_names': schema_names,
}
for component in StudentAggregateComponentRegistry.get_components():
component_name = component.get_name()
static_value = component.build_static_params(app_context)
if static_value:
ret[component_name] = static_value
schema = component.get_schema()
if hasattr(schema, 'title'):
schema_name = schema.title
else:
schema_name = schema.name
schema_names[component_name] = schema_name
schemas[component_name] = schema.get_json_schema_dict()
return ret
@staticmethod
def map(event):
for component in (StudentAggregateComponentRegistry.
get_components_for_event_source(event.source)):
component_name = component.get_name()
params = context.get().mapreduce_spec.mapper.params
static_data = params.get(component_name)
value = None
try:
value = component.process_event(event, static_data)
# pylint: disable=broad-except
except Exception, ex:
common_utils.log_exception_origin()
logging.critical('Student aggregation map function '
'component handler %s failed: %s',
component_name, str(ex))
if value:
value_str = '%s:%s' % (component_name, transforms.dumps(value))
yield event.user_id, value_str
@staticmethod
def reduce(user_id, values):
# Convenience for collections: Pre-load Student and Course objects.
student = None
try:
student = models.Student.get_student_by_user_id(user_id)
# pylint: disable=broad-except
except Exception:
common_utils.log_exception_origin()
if not student:
logging.warning(
'Student for student aggregation with user ID %s '
'was not loaded. Ignoring records for this student.', user_id)
return
params = context.get().mapreduce_spec.mapper.params
ns = params['course_namespace']
app_context = sites.get_course_index().get_app_context_for_namespace(ns)
course = courses.Course(None, app_context=app_context)
# Bundle items together into lists by collection name
event_items = collections.defaultdict(list)
for value in values:
component_name, payload = value.split(':', 1)
event_items[component_name].append(transforms.loads(payload))
# Build up per-Student aggregate by calling each component. Note that
# we call each component whether or not its mapper produced any
# output.
aggregate = {}
for component in StudentAggregateComponentRegistry.get_components():
component_name = component.get_name()
static_value = params.get(component_name)
value = {}
try:
value = component.produce_aggregate(
course, student, static_value,
event_items.get(component_name, []))
if not value:
continue
# pylint: disable=broad-except
except Exception, ex:
common_utils.log_exception_origin()
logging.critical('Student aggregation reduce function '
'component handler %s failed: %s',
component_name, str(ex))
continue
schema_name = params['schema_names'][component_name]
if schema_name not in value:
logging.critical(
'Student aggregation reduce handler %s produced '
'a dict which does not contain the top-level '
'name (%s) from its registered schema.',
component_name, schema_name)
continue
variances = transforms.validate_object_matches_json_schema(
value[schema_name], params['schemas'][component_name])
if variances:
logging.critical(
'Student aggregation reduce handler %s produced '
'a value which does not match its schema: %s',
component_name, ' '.join(variances))
continue
aggregate.update(value)
# Overwrite any previous value.
# TODO(mgainer): Consider putting records into blobstore. Some
# light activity manually producing test data is about 10K unzipped
# and 1K zipped. Unlikely that we'd see 1000x this amount of
# activity, but possible eventually.
data = zlib.compress(transforms.dumps(aggregate))
# pylint: disable=protected-access
if len(data) > datastore_types._MAX_RAW_PROPERTY_BYTES:
# TODO(mgainer): Add injection and collection of counters to
# map/reduce job. Have overridable method to verify no issues
# occurred when job completes. If critical issues, mark job
# as failed, even though M/R completed.
logging.critical(
'Aggregated compressed student data is over %d bytes; '
'cannot store this in one field; ignoring this record!')
else:
StudentAggregateEntity(key_name=user_id, data=data).put()
class StudentAggregateComponentRegistry(
data_sources.AbstractDbTableRestDataSource):
_components = []
_components_by_name = {}
_components_by_schema = {}
_components_for_event_source = collections.defaultdict(list)
@classmethod
def get_name(cls):
return 'student_aggregate'
@classmethod
def get_title(cls):
return 'Student Aggregate'
@classmethod
def get_entity_class(cls):
return StudentAggregateEntity
@classmethod
def required_generators(cls):
return [StudentAggregateGenerator]
@classmethod
def exportable(cls):
return True
@classmethod
def get_default_chunk_size(cls):
return 100
@classmethod
def get_schema(cls, app_context, log, data_source_context):
ret = schema_fields.FieldRegistry('student_aggregation')
for component in cls._components:
ret.add_property(component.get_schema())
if data_source_context.send_uncensored_pii_data:
obfuscation = 'Un-Obfuscated'
else:
obfuscation = 'Obfuscated'
description = (obfuscation + ' version of user ID. Usable to join '
'to other tables also keyed on obfuscated user ID.')
ret.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string', description=description))
return ret.get_json_schema_dict()['properties']
@classmethod
def _postprocess_rows(cls, app_context, data_source_context, schema,
log, page_number, rows):
if data_source_context.send_uncensored_pii_data:
transform_fn = lambda x: x
else:
transform_fn = cls._build_transform_fn(data_source_context)
ret = []
for row in rows:
item = transforms.loads(zlib.decompress(row.data))
item['user_id'] = transform_fn(row.key().id_or_name())
ret.append(item)
return ret
@classmethod
def get_schema_name(cls, component):
schema = component.get_schema()
if hasattr(schema, 'name'):
return schema.name
return schema.title
@classmethod
def register_component(cls, component):
component_name = component.get_name()
if ':' in component_name:
raise ValueError('Component names may not contain colons.')
if component_name in cls._components_by_name:
raise ValueError(
'There is already a student aggregation component '
'named "%s" registered. ' % component_name)
schema_name = cls.get_schema_name(component)
if schema_name in cls._components_by_schema:
raise ValueError(
'There is already a student aggregation component schema '
'member named "%s" registered by %s.' % (
schema_name,
cls._components_by_schema[schema_name].get_name()))
cls._components.append(component)
cls._components_by_name[component_name] = component
cls._components_by_schema[schema_name] = component
for event_source in component.get_event_sources_wanted():
cls._components_for_event_source[event_source].append(component)
@classmethod
def get_components_for_event_source(cls, source):
return cls._components_for_event_source.get(source, [])
@classmethod
def get_components(cls):
return cls._components
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a widget to record user satisfaction with course content."""
__author__ = 'John Orr (jorr@google.com)'
import os
import urlparse
import jinja2
import appengine_config
from common import schema_fields
from common import tags
from controllers import lessons
from controllers import utils
from models import courses
from models import custom_modules
from models import data_sources
from models import models
from models import transforms
from google.appengine.api import users
RESOURCES_PATH = '/modules/rating/resources'
# The token to namespace the XSRF token to this module
XSRF_TOKEN_NAME = 'rating'
# The "source" field to identify events recorded by this module
EVENT_SRC = 'rating-event'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'rating', 'templates')
rating_module = None
class StudentRatingProperty(models.StudentPropertyEntity):
"""Entity to store the student's current rating of each component."""
PROPERTY_NAME = 'student-rating-property'
@classmethod
def load_or_create(cls, student):
entity = cls.get(student, cls.PROPERTY_NAME)
if entity is None:
entity = cls.create(student, cls.PROPERTY_NAME)
entity.value = '{}'
entity.put()
return entity
def get_rating(self, key):
value_dict = transforms.loads(self.value)
return value_dict.get(key)
def set_rating(self, key, value):
value_dict = transforms.loads(self.value)
value_dict[key] = value
self.value = transforms.dumps(value_dict)
class StudentRatingEvent(models.EventEntity):
def for_export(self, transform_fn):
model = super(StudentRatingEvent, self).for_export(transform_fn)
data_dict = transforms.loads(model.data)
model.data = transforms.dumps({
'key': data_dict['key'],
'rating': data_dict['rating'],
'additional_comments': data_dict['additional_comments']})
return model
class RatingHandler(utils.BaseRESTHandler):
"""REST handler for recording and displaying rating scores."""
URL = '/rest/modules/rating'
def _get_payload_and_student(self):
# I18N: Message displayed to non-logged in user
access_denied_msg = self.gettext('Access denied.')
if not _rating__is_enabled_in_course_settings(self.app_context):
transforms.send_json_response(self, 401, access_denied_msg, {})
return (None, None)
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, XSRF_TOKEN_NAME, {}):
return (None, None)
user = users.get_current_user()
if user is None:
transforms.send_json_response(self, 401, access_denied_msg, {})
return (None, None)
student = models.Student.get_enrolled_student_by_email(user.email())
if student is None:
transforms.send_json_response(self, 401, access_denied_msg, {})
return (None, None)
return (transforms.loads(request.get('payload')), student)
def get(self):
payload, student = self._get_payload_and_student()
if payload is None and student is None:
return
key = payload.get('key')
prop = StudentRatingProperty.load_or_create(student)
rating = prop.get_rating(key)
payload_dict = {
'key': key,
'rating': rating
}
transforms.send_json_response(
self, 200, None, payload_dict=payload_dict)
def post(self):
payload, student = self._get_payload_and_student()
if payload is None and student is None:
return
key = payload.get('key')
rating = payload.get('rating')
additional_comments = payload.get('additional_comments')
if rating is not None:
prop = StudentRatingProperty.load_or_create(student)
prop.set_rating(key, rating)
prop.put()
StudentRatingEvent.record(EVENT_SRC, self.get_user(), transforms.dumps({
'key': key,
'rating': rating,
'additional_comments': additional_comments
}))
# I18N: Message displayed when user submits written comments
thank_you_msg = self.gettext('Thank you for your feedback.')
transforms.send_json_response(self, 200, thank_you_msg, {})
class RatingEventDataSource(data_sources.AbstractDbTableRestDataSource):
"""Data source to export all rating responses."""
@classmethod
def get_name(cls):
return 'rating_events'
@classmethod
def get_title(cls):
return 'Rating Events'
@classmethod
def get_entity_class(cls):
return StudentRatingEvent
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry('Rating Responses',
description='Student satisfaction ratings of content')
reg.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string',
description='Student ID encrypted with a session-specific key'))
reg.add_property(schema_fields.SchemaField(
'recorded_on', 'Recorded On', 'datetime',
description='Timestamp of the rating'))
reg.add_property(schema_fields.SchemaField(
'content_url', 'Content URL', 'string',
description='The URL for the content being rated'))
reg.add_property(schema_fields.SchemaField(
'rating', 'Rating', 'string',
description='The rating of the content'))
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string', optional=True,
description='The unit the content belongs to'))
reg.add_property(schema_fields.SchemaField(
'lesson_id', 'Lesson ID', 'string', optional=True,
description='The lesson the content belongs to'))
reg.add_property(schema_fields.SchemaField(
'additional_comments', 'Additional Comments', 'string',
optional=True,
description='Optional extra comments provided by the student.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def _parse_content_url(cls, content_url):
unit_id = None
lesson_id = None
url = urlparse.urlparse(content_url)
query = urlparse.parse_qs(url.query)
if 'unit' in query:
unit_id = query['unit'][0]
elif 'assessment' in query:
# Rating is not currently shown in assessments, but may as well be
# future-proof
unit_id = query['assessment'][0]
if 'lesson' in query:
lesson_id = query['lesson'][0]
return unit_id, lesson_id
@classmethod
def _postprocess_rows(cls, unused_app_context, source_context,
unused_schema, unused_log, unused_page_number, rows):
transform_fn = cls._build_transform_fn(source_context)
if source_context.send_uncensored_pii_data:
entities = [row.for_export_unsafe() for row in rows]
else:
entities = [row.for_export(transform_fn) for row in rows]
data_list = []
for entity in entities:
entity_dict = transforms.loads(entity.data)
content_url = entity_dict.get('key')
unit_id, lesson_id = cls._parse_content_url(content_url)
data_list.append({
'user_id': entity.user_id,
'recorded_on': entity.recorded_on.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'content_url': content_url,
'unit_id': unit_id,
'lesson_id': lesson_id,
'rating': str(entity_dict.get('rating')),
'additional_comments': entity_dict.get('additional_comments'),
})
return data_list
def _rating__is_enabled_in_course_settings(app_context):
env = app_context.get_environ()
return env .get('unit', {}).get('ratings_module', {}).get('enabled')
def extra_content(app_context):
if not _rating__is_enabled_in_course_settings(app_context):
return None
user = users.get_current_user()
if user is None or (
models.Student.get_enrolled_student_by_email(user.email()) is None
):
return None
template_data = {
'xsrf_token': utils.XsrfTokenManager.create_xsrf_token(XSRF_TOKEN_NAME)
}
template_environ = app_context.get_template_environ(
app_context.get_current_locale(), [TEMPLATES_DIR])
return jinja2.Markup(
template_environ.get_template('widget.html').render(template_data))
def get_course_settings_fields(unused_course):
return schema_fields.SchemaField(
'unit:ratings_module:enabled', 'Ratings widget', 'boolean',
description='Whether to show user rating widget at the bottom of '
'each unit and lesson.')
def register_module():
def on_module_enabled():
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
courses.Course.SCHEMA_SECTION_UNITS_AND_LESSONS
].append(get_course_settings_fields)
lessons.UnitHandler.EXTRA_CONTENT.append(extra_content)
data_sources.Registry.register(RatingEventDataSource)
global_routes = [
(os.path.join(RESOURCES_PATH, 'js', '.*'), tags.JQueryHandler),
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
namespaced_routes = [(RatingHandler.URL, RatingHandler)]
global rating_module # pylint: disable=global-statement
rating_module = custom_modules.Module(
'Student rating widget',
'Provide a widget to record user satisfaction with course content.',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enabled)
return rating_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oauth2 module implementation.
In order to use this module with your app you must enable it in main.py by
changing
modules.oauth2.oauth2.register_module()
to
modules.oauth2.oauth2.register_module().enable()
Additionally, you must:
1. Visit https://code.google.com/apis/console. Click on API Access and create a
client id for your web app with redirect URI set to:
https://<appid>.appspot|googleplex.com/<callback_uri>
and optionally include
http://localhost:<port>/<callback_uri>
where <appid> is your app id, <callback_uri> is the oauth2 callback URI you'd
like to use, and <port> is the port you'd like to use for localhost. You can
set <port> and <callback_uri> to basically whatever you want as long as they
are unique.
2. Once you've created the client id, click Download JSON. Take the file you get
and overwrite client_secrets.json in this directory.
3. In https://code.google.com/apis/console, click on Services and enable the
services your app requires. For these demos, you'll need to enable Drive API
and Google+.
Whenever you change scopes you'll need to revoke your access tokens. You can do
this at https://accounts.google.com/b/0/IssuedAuthSubTokens.
You can find a list of the available APIs at
http://api-python-client-doc.appspot.com/.
Finally, a note about dependencies. Oauth2 requires google-api-python-client,
which you can find at https://code.google.com/p/google-api-python-client/. We
bundle version 1.1 with Course Builder. It requires httplib2, which you can find
at https://code.google.com/p/httplib2/. We bundle version 0.8 with Course
Builder.
It also requires python-gflags from https://code.google.com/p/python-gflags/. We
bundle 2.0 with Course Builder, and we've repackaged the downloadable .tar.gz as
a .zip so Python can load its contents directly from sys.path.
Good luck!
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import os
import traceback
from apiclient import discovery
from oauth2client import appengine
import webapp2
from common import jinja_utils
from common import safe_dom
from models import custom_modules
# In real life we'd check in a blank file and set up the code to error with a
# message pointing people to https://code.google.com/apis/console.
_CLIENTSECRETS_JSON_PATH = os.path.join(
os.path.dirname(__file__), 'client_secrets.json')
_TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
class _ErrorDecorator(object):
"""Decorator used when a real decorator cannot be created.
Most often this is because there is no valid client_secrets.json. This
decorator replaces the wrapped method with one that either is a no-op, or,
if an error was given, displays the error.
"""
def __init__(self, **kwargs):
self.callback_path = 'not_enabled'
self.error = kwargs.pop('error', '')
def callback_handler(self):
"""Stub for API compatibility."""
pass
def oauth_required(self, unused_method):
"""Prints an error messsage and exits with a 500."""
def print_error_and_return_500(
request_handler, *unused_args, **unused_kwargs):
contents = safe_dom.NodeList().append(
safe_dom.Element('h1').add_text('500 internal server error')
).append(
safe_dom.Element('pre').add_text(self.error)
)
request_handler.response.write(contents.sanitized)
request_handler.response.status = 500
return print_error_and_return_500
# In real life we'd want to make one decorator per service because we wouldn't
# want users to have to give so many permissions.
def _build_decorator():
"""Builds a decorator for using oauth2 with webapp2.RequestHandlers."""
try:
return appengine.oauth2decorator_from_clientsecrets(
_CLIENTSECRETS_JSON_PATH,
scope=[
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/plus.login',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
],
message='client_secrets.json missing')
# Deliberately catch everything. pylint: disable=broad-except
except Exception as e:
display_error = (
'oauth2 module enabled, but unable to load client_secrets.json. '
'See docs in modules/oauth2.py. Original exception was:\n\n%s') % (
traceback.format_exc(e))
return _ErrorDecorator(error=display_error)
_DECORATOR = _build_decorator()
class ServiceHandler(webapp2.RequestHandler):
def build_service(self, oauth2_decorator, name, version):
http = oauth2_decorator.credentials.authorize(oauth2_decorator.http())
return discovery.build(name, version, http=http)
class _ExampleHandler(ServiceHandler):
def _write_result(self, service_name, result):
template = jinja_utils.get_template('result.html', [_TEMPLATES_DIR])
self.response.out.write(template.render({
'service_name': service_name,
'result': result,
}))
class GoogleDriveHandler(_ExampleHandler):
@_DECORATOR.oauth_required
def get(self):
drive = self.build_service(_DECORATOR, 'drive', 'v2')
about = drive.about().get().execute()
self._write_result('Drive', about['user']['displayName'])
class GoogleOauth2Handler(_ExampleHandler):
@_DECORATOR.oauth_required
def get(self):
oauth2 = self.build_service(_DECORATOR, 'oauth2', 'v2')
userinfo = oauth2.userinfo().get().execute()
self._write_result('Oauth2', userinfo['name'])
class GooglePlusHandler(_ExampleHandler):
@_DECORATOR.oauth_required
def get(self):
plus = self.build_service(_DECORATOR, 'plus', 'v1')
# This call will barf if you're logged in as @google.com because your
# profile will not be fetchable. Log in as @gmail.com and you'll be
# fine.
me = plus.people().get(userId='me').execute()
self._write_result('Plus', me['displayName'])
# None or custom_modules.Module. Placeholder for the module created by
# register_module.
module = None
def register_module():
"""Adds this module to the registry."""
global module # pylint: disable=global-statement
handlers = [
('/oauth2_google_drive', GoogleDriveHandler),
('/oauth2_google_oauth2', GoogleOauth2Handler),
('/oauth2_google_plus', GooglePlusHandler),
(_DECORATOR.callback_path, _DECORATOR.callback_handler()),
]
module = custom_modules.Module('Oauth2', 'Oauth2 pages', handlers, [])
return module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notification subsystem background jobs."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
import logging
from common import utils as common_utils
from controllers import sites
from controllers import utils as controllers_utils
from models import utils as model_utils
from modules.notifications import notifications
from google.appengine.ext import db
from google.appengine.ext import deferred
_LOG = logging.getLogger('modules.notifications.cron')
logging.basicConfig()
@db.transactional(xg=True)
def process_notification(notification, now, stats):
notification_key = notification.key()
policy = None
stats.started += 1
# Treat as module-protected. pylint: disable=protected-access
if notification._done_date:
_LOG.info(
'Skipping offline processing of notification with key %s; already '
'done at %s', notification_key, notification._done_date
)
stats.skipped_already_done += 1
return
if notifications.Manager._is_still_enqueued(notification, now):
_LOG.info(
'Skipping offline processing of notification with key %s; still on '
'queue (last enqueued: %s)', notification_key,
notification._last_enqueue_date)
stats.skipped_still_enqueued += 1
return
payload_key = db.Key.from_path(
notifications.Payload.kind(),
notifications.Payload.key_name(
notification.to, notification.intent, notification.enqueue_date)
)
payload = db.get(payload_key)
if not payload:
_LOG.error(
'Could not process notification with key %s; associated payload '
'with key %s not found', notification_key, payload_key
)
stats.missing_payload += 1
return
if notifications.Manager._is_too_old_to_reenqueue(
notification.enqueue_date, now):
stats.too_old += 1
exception = notifications.NotificationTooOldError((
'Notification %s with enqueue_date %s too old to re-enqueue at %s; '
'limit is %s days') % (
notification_key, notification.enqueue_date, now,
notifications._MAX_RETRY_DAYS,
))
notifications.Manager._mark_failed(
notification, now, exception, permanent=True)
if notification._fail_date or notification._send_date:
policy = notifications._RETENTION_POLICIES.get(
notification._retention_policy)
notifications.Manager._mark_done(notification, now)
if policy:
policy.run(notification, payload)
stats.policy_run += 1
else:
_LOG.warning(
'Cannot apply retention policy %s to notification %s and '
'payload %s; policy not found. Existing policies are: %s',
notification._retention_policy, notification_key, payload_key,
', '.join(sorted(notifications._RETENTION_POLICIES.keys()))
)
stats.missing_policy += 1
db.put([notification, payload])
else:
notifications.Manager._mark_enqueued(notification, now)
db.put(notification)
deferred.defer(
notifications.Manager._transactional_send_mail_task,
notification_key, payload_key,
_retry_options=notifications.Manager._get_retry_options()
)
stats.reenqueued += 1
class _Stats(object):
def __init__(self, namespace):
self.missing_payload = 0
self.missing_policy = 0
self.namespace = namespace
self.policy_run = 0
self.reenqueued = 0
self.skipped_already_done = 0
self.skipped_still_enqueued = 0
self.started = 0
self.too_old = 0
def __str__(self):
return (
'Stats for namespace "%(namespace)s":'
'\n\tmissing_payload: %(missing_payload)s'
'\n\tmissing_policy: %(missing_policy)s'
'\n\tpolicy_run: %(policy_run)s'
'\n\tre-enqueued: %(reenqueued)s'
'\n\tskipped_already_done: %(skipped_already_done)s'
'\n\tskipped_still_enqueued: %(skipped_still_enqueued)s'
'\n\tstarted: %(started)s'
'\n\ttoo_old: %(too_old)s'
) % self.__dict__
class ProcessPendingNotificationsHandler(controllers_utils.BaseHandler):
"""Iterates through all courses, re-enqueueing or expiring pending items.
Only one of these jobs runs at any given time. This is enforced by App
Engine's 10 minute limit plus scheduling this to run daily.
However, write operations here must still be atomic because admins could
manually visit the handler at any time.
"""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
namespaces = [
context.get_namespace_name() for context in sites.get_all_courses()
]
now = datetime.datetime.utcnow()
_LOG.info(
'Begin process_pending_notifications cron; found namespaces %s at '
'%s', ', '.join(["'%s'" % n for n in namespaces]), now
)
for namespace in namespaces:
stats = _Stats(namespace)
_LOG.info("Begin processing notifications for namespace '%s'",
namespace)
self._process_records(namespace, now, stats)
_LOG.info('Done processing. %s', stats)
def _process_records(self, namespace, now, stats):
with common_utils.Namespace(namespace):
# Treating as module-protected. pylint: disable=protected-access
mapper = model_utils.QueryMapper(
notifications.Manager._get_in_process_notifications_query())
mapper.run(process_notification, now, stats)
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notification module.
Provides Manager.send_async, which sends notifications; and Manager.query, which
queries the current status of notifications.
Notifications are transported by email. Every message you send consumes email
quota. A message is a single payload delivered to a single user. We do not
provide the entire interface email does (no CC, BCC, attachments, or HTML
bodies). Note that messages are not sent when you call Manager.send_async(), but
instead enqueued and sent later -- usually within a minute.
This module has several advantages over using App Engine's mail.send_mail()
directly.
First, we queue and retry sending messages. This happens on two levels: first,
send_async() adds items to a task queue, which retries if there are transient
failures (like the datastore being slow, or you enqueueing more messages than
App Engine's mail service can send in a minute). Second, we provide a cron that
retries message delivery for several days, so if you exhaust your daily mail
quota today we'll try again tomorrow.
The second major advantage is that we keep a record of messages sent, so you can
do analytics on them. We provide a base set of dashboards in the admin UI
showing both overall and recent notification state.
For users who are sending mail occasionally, this module smoothes away some of
the gotchas of App Engine's mail service. However, App Engine is not optimized
to be a bulk mail delivery service, so if you need to send amounts of mail in
excess of App Engine's max daily quota (1.7M messages) or minute-by-minute quota
(5k messages), you should consider using a third-party mail delivery service.
We provide a second module that allows your users to opt out of receiving email.
We strongly encourage use of that module so you don't spam people. See
modules/unsubscribe/unsubscribe.py. The general pattern for using these modules
is:
from modules.notifications import notifications
from modules.unsubscribe import unsubscribe
from google.appengine.api import users
user = users.get_current_user()
if user and not unsubscribe.has_unsubscribed(user.email):
notifications.Manager.send_async(
user.email, 'sender@example.com', 'intent', 'subject', 'body'
)
"""
__author__ = [
'johncox@google.com (John Cox)'
]
import datetime
import logging
from models import counters
from models import custom_modules
from models import entities
from models import services
from models import transforms
from models import utils
from modules import dashboard
from google.appengine.api import mail
from google.appengine.api import mail_errors
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from google.appengine.ext import deferred
_LOG = logging.getLogger('modules.notifications.notifications')
logging.basicConfig()
_APP_ENGINE_MAIL_FATAL_ERRORS = frozenset([
mail_errors.BadRequestError, mail_errors.InvalidSenderError,
])
_ENQUEUED_BUFFER_MULTIPLIER = 1.5
_KEY_DELIMITER = ':'
_MAX_ENQUEUED_HOURS = 3
_MAX_RETRY_DAYS = 3
# Number of times past which recoverable failure of send_mail() calls becomes
# hard failure. Used as a brake on runaway queues. Should be larger than the
# expected cap on the number of retries imposed by taskqueue.
_RECOVERABLE_FAILURE_CAP = 20
_SECONDS_PER_HOUR = 60 * 60
_SECONDS_PER_DAY = 24 * _SECONDS_PER_HOUR
_USECS_PER_SECOND = 10 ** 6
COUNTER_RETENTION_POLICY_RUN = counters.PerfCounter(
'gcb-notifications-retention-policy-run',
'number of times a retention policy was run'
)
COUNTER_SEND_ASYNC_FAILED_BAD_ARGUMENTS = counters.PerfCounter(
'gcb-notifications-send-async-failed-bad-arguments',
'number of times send_async failed because arguments were bad'
)
COUNTER_SEND_ASYNC_FAILED_DATASTORE_ERROR = counters.PerfCounter(
'gcb-notifications-send-async-failed-datastore-error',
'number of times send_async failed because of datastore error'
)
COUNTER_SEND_ASYNC_START = counters.PerfCounter(
'gcb-notifications-send-async-called',
'number of times send_async has been called'
)
COUNTER_SEND_ASYNC_SUCCESS = counters.PerfCounter(
'gcb-notifications-send-async-success',
'number of times send_async succeeded'
)
COUNTER_SEND_MAIL_TASK_FAILED = counters.PerfCounter(
'gcb-notifications-send-mail-task-failed',
'number of times the send mail task failed, but could be retried'
)
COUNTER_SEND_MAIL_TASK_FAILED_PERMANENTLY = counters.PerfCounter(
'gcb-notifications-send-mail-task-failed-permanently',
'number of times the send mail task failed permanently'
)
COUNTER_SEND_MAIL_TASK_FAILURE_CAP_EXCEEDED = counters.PerfCounter(
'gcb-notifications-send-mail-task-recoverable-failure-cap-exceeded',
'number of times the recoverable failure cap was exceeded'
)
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_CALLED = counters.PerfCounter(
'gcb-notifications-send-mail-task-record-failure-called',
'number of times _record_failure was called in the send mail task'
)
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_FAILED = counters.PerfCounter(
'gcb-notifications-send-mail-task-record-failure-failed',
'number of times _record_failure failed in the send mail task'
)
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_SUCCESS = counters.PerfCounter(
'gcb-notifications-send-mail-task-record-failure-success',
'number of times _record_failure succeeded in the send mail task'
)
COUNTER_SEND_MAIL_TASK_SENT = counters.PerfCounter(
'gcb-notifications-send-mail-task-sent',
'number of times the send mail task called send_mail successfully'
)
COUNTER_SEND_MAIL_TASK_SKIPPED = counters.PerfCounter(
'gcb-notifications-send-mail-task-skipped',
'number of times send mail task skipped sending mail'
)
COUNTER_SEND_MAIL_TASK_STARTED = counters.PerfCounter(
'gcb-notifications-send-mail-task-started',
'number of times the send mail task was dequeued and started')
COUNTER_SEND_MAIL_TASK_SUCCESS = counters.PerfCounter(
'gcb-notifications-send-mail-task-success',
'number of times send mail task completed successfully'
)
# TODO(johncox): remove suppression once stubs are implemented.
# pylint: disable=unused-argument
def _dt_to_epoch_usec(dt):
"""Converts datetime (assumed UTC) to epoch microseconds."""
return int((_USECS_PER_SECOND) * (
dt - datetime.datetime.utcfromtimestamp(0)).total_seconds())
def _epoch_usec_to_dt(usec):
"""Converts microseconds since epoch int to datetime (UTC, no tzinfo)."""
return (
datetime.datetime.utcfromtimestamp(0) +
datetime.timedelta(microseconds=usec)
)
class Error(Exception):
"""Base error class."""
class NotificationTooOldError(Error):
"""Recorded on a notification by cron when it's too old to re-enqueue."""
class RetentionPolicy(object):
"""Retention policy for notification data.
Notification data is spread between the Notification and Payload
objects (of which see below). Two parts of this data may be large:
Notification.audit_trail, and Payload.body.
We allow clients to specify a retention policy when calling
Manager.send_async(). This retention policy is a bundle of logic
applied after we know a notification has been sent. How and when
the retention policy is run is up to the implementation; we make
no guarantees except that once the notification is sent we will
attempt run() at least once, and if it mutates its input we will
attempt to apply those mutations at least once.
Practically, it can be used to prevent retention of data in the
datastore that is of no use to the client, even for audit
purposes.
Note that 'retention' here has nothing to do with broader user
data privacy and retention concerns -- this is purely about
responsible resource usage.
"""
# String. Name used to identify the retention policy (in the datastore, for)
# example.
NAME = None
@classmethod
def run(cls, notification, payload):
"""Runs the policy, transforming notification and payload in place.
run does not apply mutations to the backing datastore entities; it
merely returns versions of those entities that we will later attempt to
persist. Your transforms must not touch protected fields on
notification or payload; those are used by the subsystem, and changing
them can violate constraints and cause unpredictable behavior and data
corruption.
Args:
notification: Notification. The notification to process.
payload: Payload. The payload to process.
"""
pass
class RetainAll(RetentionPolicy):
"""Policy that retains all data."""
NAME = 'all'
class RetainAuditTrail(RetentionPolicy):
"""Policy that blanks Payload.body but not Notification.audit_trail."""
NAME = 'audit_trail'
@classmethod
def run(cls, unused_notification, payload):
payload.body = None
# Dict of string -> RetentionPolicy where key is the policy's NAME. All
# available retention policies.
_RETENTION_POLICIES = {
RetainAll.NAME: RetainAll,
RetainAuditTrail.NAME: RetainAuditTrail,
}
class Status(object):
"""DTO for email status."""
FAILED = 'failed'
PENDING = 'pending'
SUCCEEDED = 'succeeded'
_STATES = frozenset((FAILED, PENDING, SUCCEEDED))
def __init__(self, to, sender, intent, enqueue_date, state):
assert state in self._STATES
self.enqueue_date = enqueue_date
self.intent = intent
self.sender = sender
self.state = state
self.to = to
@classmethod
def from_notification(cls, notification):
state = cls.PENDING
# Treating as module-protected. pylint: disable=protected-access
if notification._fail_date:
state = cls.FAILED
elif notification._done_date:
state = cls.SUCCEEDED
return cls(
notification.to, notification.sender, notification.intent,
notification.enqueue_date, state
)
def __eq__(self, other):
return (
self.enqueue_date == other.enqueue_date and
self.intent == other.intent and
self.sender == other.sender and
self.state == other.state and
self.to == other.to
)
def __str__(self):
return (
'Status - to: %(to)s, from: %(sender)s, intent: %(intent)s, '
'enqueued: %(enqueue_date)s, state: %(state)s' % {
'enqueue_date': self.enqueue_date,
'intent': self.intent,
'sender': self.sender,
'state': self.state,
'to': self.to,
})
def _accumulate_statuses(notification, results):
for_user = results.get(notification.to, [])
for_user.append(Status.from_notification(notification))
results[notification.to] = for_user
class Manager(object):
"""Manages state and operation of the notifications subsystem."""
# Treating access as module-protected. pylint: disable=protected-access
@classmethod
def query(cls, to, intent):
"""Gets the Status of notifications queued previously via send_async().
Serially performs one datastore query per user in the to list.
Args:
to: list of string. The recipients of the notification.
intent: string. Short string identifier of the intent of the
notification (for example, 'invitation' or 'reminder').
Returns:
Dict of to string -> [Status, sorted by descending enqueue date].
"""
results = {}
for address in to:
mapper = utils.QueryMapper(cls._get_query_query(address, intent))
mapper.run(_accumulate_statuses, results)
return results
@classmethod
def send_async(
cls, to, sender, intent, body, subject, audit_trail=None,
retention_policy=None):
"""Asyncronously sends a notification via email.
Args:
to: string. Recipient email address. Must have a valid form, but we
cannot know that the address can actually be delivered to.
sender: string. Email address of the sender of the
notification. Must be a valid sender for the App Engine
deployment at the time the deferred send_mail() call
actually executes (meaning it cannot be the email address of
the user currently in session, because the user will not be
in session at call time). See
https://developers.google.com/appengine/docs/python/mail/emailmessagefields.
intent: string. Short string identifier of the intent of the
notification (for example, 'invitation' or 'reminder'). Each
kind of notification you are sending should have its own
intent. Used when creating keys in the index; values that
cause the resulting key to be >500B will fail. May not
contain a colon.
body: string. The data payload of the notification. Must fit in a
datastore entity.
subject: string. Subject line for the notification.
audit_trail: JSON-serializable object. An optional audit trail that,
when used with the default retention policy, will be
retained even after the body is scrubbed from the datastore.
retention_policy: RetentionPolicy. The retention policy to use for
data after a Notification has been sent. By default, we
retain the audit_trail but not the body.
Returns:
(notification_key, payload_key). A 2-tuple of datastore keys for the
created notification and payload.
Raises:
Exception: if values delegated to model initializers are invalid.
ValueError: if to or sender are malformed according to App Engine
(note that well-formed values do not guarantee success).
"""
COUNTER_SEND_ASYNC_START.inc()
enqueue_date = datetime.datetime.utcnow()
retention_policy = (
retention_policy if retention_policy else RetainAuditTrail)
for email in (to, sender):
if not mail.is_email_valid(email):
COUNTER_SEND_ASYNC_FAILED_BAD_ARGUMENTS.inc()
raise ValueError('Malformed email address: "%s"' % email)
if retention_policy.NAME not in _RETENTION_POLICIES:
COUNTER_SEND_ASYNC_FAILED_BAD_ARGUMENTS.inc()
raise ValueError('Invalid retention policy: ' +
str(retention_policy))
try:
# pylint: disable=unbalanced-tuple-unpacking,unpacking-non-sequence
notification, payload = cls._make_unsaved_models(
audit_trail, body, enqueue_date, intent, retention_policy.NAME,
sender, subject, to,
)
except Exception, e:
COUNTER_SEND_ASYNC_FAILED_BAD_ARGUMENTS.inc()
raise e
cls._mark_enqueued(notification, enqueue_date)
try:
# pylint: disable=unbalanced-tuple-unpacking,unpacking-non-sequence
notification_key, payload_key = cls._save_notification_and_payload(
notification, payload,
)
except Exception, e:
COUNTER_SEND_ASYNC_FAILED_DATASTORE_ERROR.inc()
raise e
deferred.defer(
cls._transactional_send_mail_task, notification_key, payload_key,
_retry_options=cls._get_retry_options())
COUNTER_SEND_ASYNC_SUCCESS.inc()
return notification_key, payload_key
@classmethod
def _make_unsaved_models(
cls, audit_trail, body, enqueue_date, intent, retention_policy, sender,
subject, to):
notification = Notification(
audit_trail=audit_trail, enqueue_date=enqueue_date, intent=intent,
_retention_policy=retention_policy, sender=sender, subject=subject,
to=to,
)
payload = Payload(
body=body, enqueue_date=enqueue_date, intent=intent, to=to,
_retention_policy=retention_policy,
)
return notification, payload
@classmethod
@db.transactional(xg=True)
def _save_notification_and_payload(cls, notification, payload):
return db.put([notification, payload])
@classmethod
def _send_mail_task(
cls, notification_key, payload_key, test_send_mail_fn=None):
exception = None
failed_permanently = False
now = datetime.datetime.utcnow()
# pylint: disable=unbalanced-tuple-unpacking,unpacking-non-sequence
notification, payload = db.get([notification_key, payload_key])
# pylint: enable=unbalanced-tuple-unpacking,unpacking-non-sequence
send_mail_fn = (
test_send_mail_fn if test_send_mail_fn else mail.send_mail)
sent = False
COUNTER_SEND_MAIL_TASK_STARTED.inc()
if not notification:
COUNTER_SEND_MAIL_TASK_FAILED_PERMANENTLY.inc()
raise deferred.PermanentTaskFailure(
'Notification missing: ' + str(notification_key)
)
if not payload:
COUNTER_SEND_MAIL_TASK_FAILED_PERMANENTLY.inc()
raise deferred.PermanentTaskFailure(
'Payload missing: ' + str(payload_key)
)
policy = _RETENTION_POLICIES.get(notification._retention_policy)
if not policy:
COUNTER_SEND_MAIL_TASK_FAILED_PERMANENTLY.inc()
raise deferred.PermanentTaskFailure(
'Unknown retention policy: ' + notification._retention_policy
)
if (cls._done(notification) or cls._failed(notification) or
cls._sent(notification)):
COUNTER_SEND_MAIL_TASK_SKIPPED.inc()
COUNTER_SEND_MAIL_TASK_SUCCESS.inc()
return
if notification._recoverable_failure_count > _RECOVERABLE_FAILURE_CAP:
message = (
'Recoverable failure cap (%s) exceeded for notification with '
'key %s'
) % (_RECOVERABLE_FAILURE_CAP, str(notification.key()))
_LOG.error(message)
permanent_failure = deferred.PermanentTaskFailure(message)
try:
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_CALLED.inc()
cls._record_failure(
notification, payload, permanent_failure, dt=now,
permanent=True, policy=policy
)
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_SUCCESS.inc()
# Must be vague. pylint: disable=broad-except
except Exception, e:
_LOG.error(
cls._get_record_failure_error_message(
notification, payload, e)
)
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_FAILED.inc()
COUNTER_SEND_MAIL_TASK_FAILED_PERMANENTLY.inc()
COUNTER_SEND_MAIL_TASK_FAILURE_CAP_EXCEEDED.inc()
raise permanent_failure
try:
send_mail_fn(
notification.sender, notification.to, notification.subject,
payload.body
)
sent = True
# Must be vague. pylint: disable=broad-except
except Exception, exception:
failed_permanently = cls._is_send_mail_error_permanent(exception)
if not failed_permanently:
try:
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_CALLED.inc()
cls._record_failure(notification, payload, exception)
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_SUCCESS.inc()
# Must be vague. pylint: disable=broad-except
except Exception, e:
_LOG.error(
cls._get_record_failure_error_message(
notification, payload, exception
)
)
COUNTER_SEND_MAIL_TASK_RECORD_FAILURE_FAILED.inc()
_LOG.error(
('Recoverable error encountered when processing '
'notification task; will retry. Error was: ' +
str(exception))
)
COUNTER_SEND_MAIL_TASK_FAILED.inc()
# Set by except: clause above. pylint: disable=raising-bad-type
raise exception
if sent:
cls._mark_sent(notification, now)
if failed_permanently:
cls._mark_failed(notification, now, exception, permanent=True)
if sent or failed_permanently:
policy.run(notification, payload)
cls._mark_done(notification, now)
db.put([notification, payload])
COUNTER_RETENTION_POLICY_RUN.inc()
if sent:
COUNTER_SEND_MAIL_TASK_SENT.inc()
elif failed_permanently:
COUNTER_SEND_MAIL_TASK_FAILED_PERMANENTLY.inc()
COUNTER_SEND_MAIL_TASK_SUCCESS.inc()
@classmethod
@db.transactional(
propagation=datastore_rpc.TransactionOptions.INDEPENDENT, xg=True)
def _record_failure(
cls, notification, payload, exception, dt=None, permanent=False,
policy=None):
"""Marks failure data on entities in an external transaction.
IMPORTANT: because we're using
datastore_rpc.TransactionOptions.INDEPENDENT, mutations on notification
and payload here are *not* transactionally consistent in the
caller. Consequently, callers must not read or mutate them after calling
this method.
The upside is that this allows us to record failure data on entities
inside a transaction, and that transaction can throw without rolling
back these mutations.
Args:
notification: Notification. The notification to mutate.
payload: Payload. The payload to mutate.
exception: Exception. The exception that prompted the mutation.
dt: datetime. notification_fail_time and notification._done_time
to record if permanent is True.
permanent: boolean. If True, the notification will be marked done
and the retention policy will be run.
policy: RetentionPolicy. The retention policy to apply if permanent
was True.
Returns:
(notification_key, payload_key) 2-tuple.
"""
notification._recoverable_failure_count += 1
cls._mark_failed(notification, dt, exception, permanent=permanent)
if permanent:
assert dt and policy
cls._mark_done(notification, dt)
policy.run(notification, payload)
COUNTER_RETENTION_POLICY_RUN.inc()
return db.put([notification, payload])
@classmethod
def _get_record_failure_error_message(
cls, notification, payload, exception):
return (
'Unable to record failure for notification with key %s and payload '
'with key %s; encountered %s error with text: "%s"') % (
str(notification.key()), str(payload.key()),
exception.__class__.__name__, str(exception))
@classmethod
def _transactional_send_mail_task(cls, notification_key, payload_key):
# Can't use decorator because of taskqueue serialization.
db.run_in_transaction_options(
db.create_transaction_options(xg=True), cls._send_mail_task,
notification_key, payload_key)
@classmethod
def _done(cls, notification):
return bool(notification._done_date)
@classmethod
def _failed(cls, notification):
return bool(notification._fail_date)
@classmethod
def _get_in_process_notifications_query(cls):
return Notification.all(
).filter(
'%s =' % Notification._done_date.name, None
).order(
'-' + Notification.enqueue_date.name
)
@classmethod
def _get_query_query(cls, to, intent):
return Notification.all(
).filter(
Notification.to.name, to
).filter(
Notification.intent.name, intent
).order(
'-' + Notification.enqueue_date.name
)
@classmethod
def _get_last_exception_value(cls, exception):
return {
'type': '%s.%s' % (
exception.__class__.__module__, exception.__class__.__name__),
'string': str(exception),
}
@classmethod
def _get_retry_options(cls):
# Retry up to once every hour with exponential backoff; limit tasks to
# three hours; cron will re-enqueue them for days. This is because the
# purpose of the queue is retrying in case of transient errors
# (datastore or send_mail burbles), and the purpose of the cron is
# retrying in case of longer errors (quota exhaustion).
return taskqueue.TaskRetryOptions(
min_backoff_seconds=1, max_backoff_seconds=_SECONDS_PER_HOUR,
max_doublings=12, # Overflow task age limit; don't want underflow
task_age_limit=cls._get_task_age_limit_seconds(),
)
@classmethod
def _get_task_age_limit_seconds(cls):
return _MAX_ENQUEUED_HOURS * _SECONDS_PER_HOUR
@classmethod
def _is_too_old_to_reenqueue(cls, dt, now):
return now - dt > datetime.timedelta(days=_MAX_RETRY_DAYS)
@classmethod
def _is_send_mail_error_permanent(cls, exception):
return type(exception) in _APP_ENGINE_MAIL_FATAL_ERRORS
@classmethod
def _is_still_enqueued(cls, notification, dt):
"""Whether or not an item is still on the deferred queue.
This isn't exact -- we can't query the queue. We can know how long items
can be on the queue, so we can make a guess. Our guess has false
positives: there is clock skew between datastore and taskqueue, and
false negatives are terrible because they cause multiple messages to get
sent. Consequently, we consider items that were last enqueued slightly
too long ago to still be on the queue. This can cause re-enqueueing of
some items to get delayed by one cron interval. We ameliorate this a bit
by checking for side-effects of the dequeue (_done|fail|send_date set).
Args:
notification: Notification. The notification to check status of.
dt: datetime, assumed UTC. The datetime to check enqueued status at.
Returns:
Boolean. False if the item has never been enqueued, or was enqueued
long enough ago we're sure it's no longer on the queue, or has
already been processed (indicating it's been enqueued and
dequeued). True otherwise.
"""
if (notification._done_date or notification._fail_date or
notification._send_date) or not notification._last_enqueue_date:
return False
return cls._get_task_age_limit_seconds() > (
((dt - notification._last_enqueue_date).total_seconds() *
_ENQUEUED_BUFFER_MULTIPLIER)
)
@classmethod
def _mark_done(cls, notification, dt):
notification._done_date = dt
@classmethod
def _mark_enqueued(cls, notification, dt):
notification._last_enqueue_date = dt
@classmethod
def _mark_failed(cls, notification, dt, exception, permanent=False):
notification._last_exception = cls._get_last_exception_value(exception)
if permanent:
notification._fail_date = dt
@classmethod
def _mark_sent(cls, notification, dt):
notification._send_date = dt
@classmethod
def _sent(cls, notification):
return bool(notification._send_date)
class _IntentProperty(db.StringProperty):
"""Property that holds intent strings."""
@classmethod
def _get_message(cls, value):
return 'Intent "%s" cannot contain "%s"' % (value, _KEY_DELIMITER)
@classmethod
def check(cls, value):
if _KEY_DELIMITER in value:
raise ValueError(cls._get_message(value))
def validate(self, value):
value = super(_IntentProperty, self).validate(value)
try:
self.check(value)
except ValueError:
raise db.BadValueError(self._get_message(value))
return value
class _SerializedProperty(db.Property):
"""Custom property that stores JSON-serialized data."""
def __init__(self, *args, **kwargs):
# Disallow indexing and explicitly set indexed=False. If indexed is
# unset it defaults to True; if True, it imposes a 500 byte limit on the
# value, and longer values throw during db.put(). We want to support
# larger values rather than searching, and we do not want this to be a
# TextProperty because the underlying type is not db.Text.
if kwargs.get('indexed'):
raise ValueError('_SerializedProperty does not support indexing')
kwargs['indexed'] = False
super(_SerializedProperty, self).__init__(*args, **kwargs)
def get_value_for_datastore(self, model_instance):
return transforms.dumps(super(
_SerializedProperty, self
).get_value_for_datastore(model_instance))
def make_value_from_datastore(self, value):
return transforms.loads(value)
def validate(self, value):
value = super(_SerializedProperty, self).validate(value)
try:
transforms.dumps(value)
except TypeError, e:
raise db.BadValueError(
'%s is not JSON-serializable; error was "%s"' % (value, e))
return value
class _Model(entities.BaseEntity):
"""Abstract base model that handles key calculation."""
# String. Template used in key generation.
_KEY_TEMPLATE = (
'(%(kind)s%(delim)s%(to)s%(delim)s%(intent)s%(delim)s%(enqueue_date)s)'
)
# When the record was enqueued in client code.
enqueue_date = db.DateTimeProperty(required=True)
# String indicating the intent of the notification. Intents are used to
# group and index notifications. Used in key formation; may not contain a
# colon.
intent = _IntentProperty(required=True)
# Email address used to compose the To:. May house only one value. Subject
# to the restrictions of the underlying App Engine mail library; see the to
# field in
# https://developers.google.com/appengine/docs/python/mail/emailmessagefields.
to = db.StringProperty(required=True)
# When the record was last changed.
_change_date = db.DateTimeProperty(auto_now=True, required=True)
# RetentionPolicy.NAME string. Identifier for the retention policy for the
# Payload.
_retention_policy = db.StringProperty(
required=True, choices=_RETENTION_POLICIES.keys())
def __init__(self, *args, **kwargs):
assert 'key_name' not in kwargs, (
'Setting key_name manually not supported')
kwargs['key_name'] = self.key_name(
self._require_kwarg('to', kwargs),
self._require_kwarg('intent', kwargs),
self._require_kwarg('enqueue_date', kwargs))
super(_Model, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, to, intent, enqueue_date):
_IntentProperty.check(intent)
return cls._KEY_TEMPLATE % {
'delim': _KEY_DELIMITER,
'enqueue_date': _dt_to_epoch_usec(enqueue_date),
'intent': intent,
'kind': cls.kind().lower(),
'to': to,
}
@classmethod
def safe_key(cls, db_key, transform_fn):
_, unsafe_to, intent, serialized_dt = cls._split_key_name(db_key.name())
return db.Key.from_path(
cls.kind(), cls.key_name(
transform_fn(unsafe_to), intent,
_epoch_usec_to_dt(int(serialized_dt))))
@classmethod
def _split_key_name(cls, key_name):
return key_name[1:-1].split(_KEY_DELIMITER)
def _require_kwarg(self, name, kwargs):
"""Gets kwarg with given name or dies."""
value = kwargs.get(name)
assert value, 'Missing required property: ' + name
return value
class Notification(_Model):
# Audit trail of JSON-serializable data. By default Payload.body is deleted
# when it is no longer needed. If you need information for audit purposes,
# pass it here, and the default retention policy will keep it.
audit_trail = _SerializedProperty()
# Email address used to compose the From:. Subject to the sender
# restrictions of the underlying App Engine mail library; see the sender
# field in
# https://developers.google.com/appengine/docs/python/mail/emailmessagefields.
sender = db.StringProperty(required=True)
# Subject line of the notification.
subject = db.TextProperty(required=True)
# When processing the record fully finished, meaning that the record will
# never be processed by the notification subsystem again. None if the record
# is still in flight. Indicates that the record has either succeeded or
# failed and its retention policy has been applied.
_done_date = db.DateTimeProperty()
# When processing of the record failed and will no longer be retried. None
# if this has not happened. Does not indicated the retention policy has been
# applied; see _done_date.
_fail_date = db.DateTimeProperty()
# When the notification was last placed on the deferred queue.
_last_enqueue_date = db.DateTimeProperty()
# JSON representation of the last recordable exception encountered while
# processing the notification. Format is
# {'type': type_str, 'string': str(exception)}.
_last_exception = _SerializedProperty()
# Number of recoverable failures we've had for this notification.
_recoverable_failure_count = db.IntegerProperty(required=True, default=0)
# When a send_mail # call finshed for the record and we recorded it in the
# datastore. May be None if this has not yet happend. Does not indicate the
# retention policy has been applied; see _done_date.
_send_date = db.DateTimeProperty()
_PROPERTY_EXPORT_BLACKLIST = [audit_trail, _last_exception, subject]
def for_export(self, transform_fn):
model = super(Notification, self).for_export(transform_fn)
model.to = transform_fn(model.to)
model.sender = transform_fn(model.sender)
return model
class Payload(_Model):
"""The data payload of a Notification.
We extract this data from Notification to increase the total size budget
available to the user, which is capped at 1MB/entity.
"""
# Body of the payload.
body = db.TextProperty()
_PROPERTY_EXPORT_BLACKLIST = [body]
def __init__(self, *args, **kwargs):
super(Payload, self).__init__(*args, **kwargs)
_IntentProperty().validate(kwargs.get('intent'))
custom_module = None
def register_module():
"""Registers the module with the Registry."""
def on_module_enabled():
dashboard.filer.ALLOWED_ASSET_TEXT_BASES = (
dashboard.filer.ALLOWED_ASSET_TEXT_BASES.union(
['views/notifications']))
def on_module_disabled():
dashboard.filer.ALLOWED_ASSET_TEXT_BASES = (
dashboard.filer.ALLOWED_ASSET_TEXT_BASES.difference(
['views/notifications']))
global custom_module # pylint: disable=global-statement
# Avert circular dependency. pylint: disable=g-import-not-at-top
from modules.notifications import cron
from modules.notifications import stats
stats.register_analytic()
cron_handlers = [(
'/cron/process_pending_notifications',
cron.ProcessPendingNotificationsHandler
)]
custom_module = custom_modules.Module(
'Notifications', 'Student notification management system.',
cron_handlers,
[],
notify_module_disabled=on_module_disabled,
notify_module_enabled=on_module_enabled
)
class Service(services.Notifications):
def enabled(self):
return custom_module.enabled
def query(self, to, intent):
return Manager.query(to, intent)
def send_async(
self, to, sender, intent, body, subject, audit_trail=None,
retention_policy=None):
return Manager.send_async(
to, sender, intent, body, subject, audit_trail=audit_trail,
retention_policy=retention_policy)
services.notifications = Service()
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stats generation for the notifications module."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
from models import analytics
from models import data_sources
from models import jobs
from modules.dashboard import tabs
from modules.notifications import notifications
_SERIALIZED_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
class _Result(object):
# Treating as module-protected. pylint: disable=protected-access
def __init__(self, now):
self.now = now
self.last_day = _Bin('day', self.now - datetime.timedelta(days=1))
self.last_hour = _Bin('hour', self.now - datetime.timedelta(hours=1))
self.last_week = _Bin('week', self.now - datetime.timedelta(days=7))
self.bins = [self.last_hour, self.last_day, self.last_week]
self._totals = {'all': 0}
self._totals.update(
{state: 0 for state in notifications.Status._STATES})
def add(self, state, dt):
# Datastore values may no longer be found in code; silently
# discard if so.
if state in notifications.Status._STATES:
self._totals['all'] += 1
self._totals[state] += 1
for selected in self.bins:
if dt > selected.cutoff:
selected.add(state)
def failed(self):
return self._totals[notifications.Status.FAILED]
def pending(self):
return self._totals[notifications.Status.PENDING]
def succeeded(self):
return self._totals[notifications.Status.SUCCEEDED]
def total(self):
return self._totals['all']
class _Bin(object):
def __init__(self, name, cutoff):
# Treating as module-protected. pylint: disable=protected-access
self._data = {state: 0 for state in notifications.Status._STATES}
self.cutoff = cutoff
self.name = name
def add(self, state):
self._data[state] += 1
def failed(self):
return self._data[notifications.Status.FAILED]
def pending(self):
return self._data[notifications.Status.PENDING]
def succeeded(self):
return self._data[notifications.Status.SUCCEEDED]
def total(self):
return sum(self._data.values())
class CountsGenerator(jobs.MapReduceJob):
@staticmethod
def get_description():
return 'notification'
def entity_class(self):
return notifications.Notification
@staticmethod
def map(notification):
yield (
notifications.Status.from_notification(notification).state,
# Treating as module-protected. pylint: disable=protected-access
notification._enqueue_date
)
@staticmethod
def reduce(key, values):
yield key, values
class NotificationsDataSource(data_sources.SynchronousQuery):
@staticmethod
def fill_values(app_context, template_values, job):
now = datetime.datetime.utcnow()
result = _Result(now)
for state_name, create_dates in jobs.MapReduceJob.get_results(job):
for create_date in create_dates:
result.add(
state_name, datetime.datetime.strptime(
create_date, _SERIALIZED_DATETIME_FORMAT))
template_values.update({'result': result})
@staticmethod
def required_generators():
return [CountsGenerator]
def register_analytic():
data_sources.Registry.register(NotificationsDataSource)
name = 'notifications'
title = 'Notifications'
visualization = analytics.Visualization(
name, title, 'stats.html',
data_source_classes=[NotificationsDataSource])
tabs.Registry.register('analytics', name, title, [visualization])
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide the capability to add extra links or text to the main navbar."""
__author__ = 'John Orr (jorr@google.com)'
import os
import appengine_config
from common import schema_fields
from controllers import utils
from models import courses
from models import custom_modules
from models import models
from google.appengine.api import users
EXTRA_TABS_KEY = 'extra_tabs'
LABEL_KEY = 'label'
URL_KEY = 'url'
CONTENT_KEY = 'content'
POSITION_KEY = 'position'
VISIBILITY_KEY = 'visibility'
POS_LEFT = 'left'
POS_RIGHT = 'right'
VIS_ALL = 'all'
VIS_STUDENT = 'student'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'extra_tabs', 'templates')
class ExtraTabHandler(utils.BaseHandler):
URL = 'modules/extra_tabs/render'
INDEX_QUERY_PARAM = 'index'
def get(self):
index = int(self.request.get(self.INDEX_QUERY_PARAM))
env = courses.Course.get_environ(self.get_course().app_context)
tab_data = env['course'][EXTRA_TABS_KEY][index]
if not _is_visible(tab_data, self.get_student()):
return
self.template_value['navbar'] = {}
self.template_value['content'] = tab_data['content']
self.render('extra_tab_page.html', additional_dirs=[TEMPLATES_DIR])
def options_schema_provider(unused_course):
extra_tab_type = schema_fields.FieldRegistry(
'Extra Tab',
extra_schema_dict_values={'className': 'settings-list-item'})
extra_tab_type.add_property(schema_fields.SchemaField(
LABEL_KEY, 'Label', 'string', optional=True,
description='The tab to be shown on the navbar.'))
extra_tab_type.add_property(schema_fields.SchemaField(
POSITION_KEY, 'Tab Position', 'string', optional=True, i18n=False,
select_data=[(POS_LEFT, 'Left'), (POS_RIGHT, 'Right')]))
extra_tab_type.add_property(schema_fields.SchemaField(
VISIBILITY_KEY, 'Visibility', 'string', optional=True, i18n=False,
select_data=[
(VIS_ALL, 'Everyone'), (VIS_STUDENT, 'Registered students')]))
extra_tab_type.add_property(schema_fields.SchemaField(
URL_KEY, 'Tab URL', 'string', optional=True,
description='If a URL is provided, the tab will link to that URL. '
'Otherwise it will display the "tab content" in a page.'))
extra_tab_type.add_property(schema_fields.SchemaField(
CONTENT_KEY, 'Tab Content', 'html', optional=True))
return schema_fields.FieldArray(
'course:' + EXTRA_TABS_KEY, 'Extra tabs',
item_type=extra_tab_type,
description=(
'Extra tabs to appear on the course navbar.'),
extra_schema_dict_values={
'className': 'settings-list wide',
'listAddLabel': 'Add a tab',
'listRemoveLabel': 'Delete tab'})
def _get_current_student():
user = users.get_current_user()
if user is None:
return None
else:
return models.Student.get_enrolled_student_by_email(user.email())
def _is_visible(tab_data, student):
return tab_data.get(VISIBILITY_KEY) != VIS_STUDENT or (
student is not None and student.is_enrolled)
def _get_links(app_context, pos):
env = courses.Course.get_environ(app_context)
student = _get_current_student()
links = []
for tab_index, tab_data in enumerate(env['course'].get(EXTRA_TABS_KEY, [])):
if _is_visible(tab_data, student) and tab_data.get(POSITION_KEY) == pos:
label = tab_data.get(LABEL_KEY)
url = tab_data.get(URL_KEY)
if not url:
url = '%s?%s=%s' % (
ExtraTabHandler.URL, ExtraTabHandler.INDEX_QUERY_PARAM,
tab_index)
links.append((url, label))
return links
def left_links(app_context):
return _get_links(app_context, POS_LEFT)
def right_links(app_context):
return _get_links(app_context, POS_RIGHT)
extra_tabs_module = None
def register_module():
def on_module_enabled():
courses.Course.OPTIONS_SCHEMA_PROVIDERS.setdefault(
courses.Course.SCHEMA_SECTION_HOMEPAGE, []).append(
options_schema_provider)
utils.CourseHandler.LEFT_LINKS.append(left_links)
utils.CourseHandler.RIGHT_LINKS.append(right_links)
def on_module_disabled():
courses.Course.OPTIONS_SCHEMA_PROVIDERS.setdefault(
courses.Course.SCHEMA_SECTION_HOMEPAGE, []).remove(
options_schema_provider)
utils.CourseHandler.LEFT_LINKS.remove(left_links)
utils.CourseHandler.RIGHT_LINKS.remove(right_links)
global_routes = []
namespaced_routes = [
('/' + ExtraTabHandler.URL, ExtraTabHandler)]
global extra_tabs_module # pylint: disable=global-statement
extra_tabs_module = custom_modules.Module(
'Extra Navbar Tabs',
'Add tabs to the main navbar.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disabled,
notify_module_enabled=on_module_enabled)
return extra_tabs_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Certificates.
Course creators will need to customize both the appearance of the certificate,
and also the logic used to determine when it has been earned by a student.
The qualification logic can be customized by:
* using the designated user interface in course settings
* editing the course.yaml file
* adding Python code to custom_criteria.py
The appearance of the certificate can be customized either system-wide, or else
on a course-by-course basis. To customize the certificate appearance
system-wide, edit the file templates/certificate.html in this module.
To make a course-specific certificate, upload a file named "certificate.html"
into the View Templates section of the Dashboard > Assets tab. Images and
resources used by this file should also be uploaded in Dashboard > Assets.
"""
__author__ = [
'Saifu Angto (saifu@google.com)',
'John Orr (jorr@google.com)']
import os
import StringIO
from mapreduce import context
from reportlab.lib import pagesizes
from reportlab.lib.units import inch
from reportlab.pdfgen import canvas
import appengine_config
from common import safe_dom
from common import schema_fields
from common import tags
from controllers import sites
from controllers import utils
from models import analytics
from models import courses
from models import custom_modules
from models import data_sources
from models import jobs
from models import models
from modules.analytics import student_aggregate
from modules.certificate import custom_criteria
from modules.dashboard import course_settings
from modules.dashboard import tabs
CERTIFICATE_HANDLER_PATH = 'certificate'
CERTIFICATE_PDF_HANDLER_PATH = 'certificate.pdf'
RESOURCES_PATH = '/modules/certificate/resources'
class ShowCertificateHandler(utils.BaseHandler):
"""Handler for student to print course certificate."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not student_is_qualified(student, self.get_course()):
self.redirect('/')
return
environ = self.app_context.get_environ()
templates_dir = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'certificate', 'templates')
template = self.get_template('certificate.html', [templates_dir])
self.response.out.write(template.render({
'student': student,
'course': environ['course']['title'],
'google_analytics_id': environ['course'].get('google_analytics_id')
}))
class ShowCertificatePdfHandler(utils.BaseHandler):
"""Handler for student to print course certificate."""
def _print_cert(self, out, course, student):
c = canvas.Canvas(out, pagesize=pagesizes.landscape(pagesizes.LETTER))
c.setTitle('Course Builder Certificate')
# Draw the background image
image_path = os.path.join(
appengine_config.BUNDLE_ROOT,
'modules', 'certificate', 'resources', 'images', 'cert.png')
image_data = open(image_path).read()
image = canvas.ImageReader(StringIO.StringIO(image_data))
c.drawImage(
image, 0, -1.5 * inch, width=11 * inch, preserveAspectRatio=True)
text = c.beginText()
text.setTextOrigin(0.5 * inch, 4.5 * inch)
text.setFont('Helvetica', 40)
text.setFillColorRGB(75.0 / 255, 162.0 / 255, 65.0 / 255)
text.textLine(self.gettext('Certificate of Completion'))
text.setTextOrigin(0.5 * inch, 4.0 * inch)
text.setFillColorRGB(0.4, 0.4, 0.4)
text.setFont('Helvetica', 20)
text.textLine(self.gettext('Presented to'))
text.setTextOrigin(0.5 * inch, 2.3 * inch)
text.textLine(self.gettext('for successfully completing the'))
text.textLine(self.gettext('%(course)s course') % {'course': course})
c.drawText(text)
c.setStrokeColorRGB(0.8, 0.8, 0.8)
c.setLineWidth(0.1)
c.line(0.5 * inch, 3.0 * inch, 10.5 * inch, 3.0 * inch)
c.setFont('Helvetica', 24)
c.setFillColorRGB(0.4, 0.4, 0.4)
c.drawCentredString(5.0 * inch, 3.1 * inch, student.name)
c.showPage()
c.save()
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not student_is_qualified(student, self.get_course()):
self.redirect('/')
return
course = courses.Course.get_environ(self.app_context)['course']['title']
self.response.headers['Content-Type'] = 'application/pdf'
self.response.headers['Content-Disposition'] = (
'attachment; filename=certificate.pdf')
self._print_cert(self.response.out, course, student)
def _get_score_by_id(score_list, assessment_id):
for score in score_list:
if score['id'] == str(assessment_id):
return score
return None
def _prepare_custom_criterion(custom, student, course):
assert hasattr(custom_criteria, custom), ((
'custom criterion %s is not implemented '
'as a function in custom_criteria.py.') % custom)
assert (custom in custom_criteria.registration_table), ((
'Custom criterion %s is not whitelisted '
'in the registration_table in custom_criteria.py.') % custom)
def _check_custom_criterion():
if not getattr(custom_criteria, custom)(student, course):
return False
return True
return _check_custom_criterion
def _prepare_assessment_criterion(score_list, criterion):
score = _get_score_by_id(score_list, criterion['assessment_id'])
assert score is not None, (
'Invalid assessment id %s.' % criterion['assessment_id'])
pass_percent = criterion.get('pass_percent', '')
if pass_percent is not '':
# Must be machine graded
assert not score['human_graded'], (
'If pass_percent is provided, '
'the assessment must be machine graded.')
pass_percent = float(pass_percent)
assert (pass_percent >= 0.0) and (pass_percent <= 100.0), (
'pass_percent must be between 0 and 100.')
else:
# Must be peer graded
assert score['human_graded'], (
'If pass_percent is not provided, '
'the assessment must be human graded.')
def _check_assessment_criterion():
if not score['completed']:
return False
if pass_percent is not '':
return score['score'] >= pass_percent
return True
return _check_assessment_criterion
def student_is_qualified(student, course):
"""Determines whether the student has met criteria for a certificate.
Args:
student: models.models.Student. The student entity to test.
course: modesl.courses.Course. The course which the student is
enrolled in.
Returns:
True if the student is qualified, False otherwise.
"""
environ = course.app_context.get_environ()
score_list = course.get_all_scores(student)
if not environ.get('certificate_criteria'):
return False
criteria_functions = []
# First validate the correctness of _all_ provided criteria
for criterion in environ['certificate_criteria']:
assessment_id = criterion.get('assessment_id', '')
custom = criterion.get('custom_criteria', '')
assert (assessment_id is not '') or (custom is not ''), (
'assessment_id and custom_criteria cannot be both empty.')
if custom is not '':
criteria_functions.append(
_prepare_custom_criterion(custom, student, course))
elif assessment_id is not '':
criteria_functions.append(
_prepare_assessment_criterion(score_list, criterion))
else:
assert False, 'Invalid certificate criterion %s.' % criterion
# All criteria are valid, now do the checking.
for criterion_function in criteria_functions:
if not criterion_function():
return False
return True
def get_certificate_table_entry(handler, student, course):
# I18N: Title of section on page showing certificates for course completion.
title = handler.gettext('Certificate')
if student_is_qualified(student, course):
nl = safe_dom.NodeList()
nl.append(
safe_dom.A(
CERTIFICATE_HANDLER_PATH
).add_text(
# I18N: Label on control to navigate to page showing certificate
handler.gettext('Click for certificate'))
).append(
safe_dom.Text(' | ')
).append(
safe_dom.A(
CERTIFICATE_PDF_HANDLER_PATH
).add_text(
# I18N: Link for a PDF.
handler.gettext('Download PDF'))
)
return (title, nl)
else:
return (
title,
# I18N: Text indicating student has not yet completed a course.
handler.gettext(
'You have not yet met the course requirements for a '
'certificate of completion.'))
def get_criteria_editor_schema(course):
criterion_type = schema_fields.FieldRegistry(
'Criterion',
extra_schema_dict_values={'className': 'settings-list-item'})
select_data = [('default', '-- Select requirement --'), (
'', '-- Custom criterion --')]
for unit in course.get_assessment_list():
select_data.append((unit.unit_id, unit.title + (
' [Peer Graded]' if course.needs_human_grader(unit) else '')))
criterion_type.add_property(schema_fields.SchemaField(
'assessment_id', 'Requirement', 'string', optional=True,
# The JS will only reveal the following description
# for peer-graded assessments
description='When specifying a peer graded assessment as criterion, '
'the student should complete both the assessment '
'and the minimum of peer reviews.',
select_data=select_data,
extra_schema_dict_values={
'className': 'inputEx-Field assessment-dropdown'}))
criterion_type.add_property(schema_fields.SchemaField(
'pass_percent', 'Passing Percentage', 'string', optional=True,
extra_schema_dict_values={
'className': 'pass-percent'}))
select_data = [('', '-- Select criterion method--')] + [(
x, x) for x in custom_criteria.registration_table]
criterion_type.add_property(schema_fields.SchemaField(
'custom_criteria', 'Custom Criterion', 'string', optional=True,
select_data=select_data,
extra_schema_dict_values={
'className': 'custom-criteria'}))
is_peer_assessment_table = {}
for unit in course.get_assessment_list():
is_peer_assessment_table[unit.unit_id] = (
True if course.needs_human_grader(unit) else False)
return schema_fields.FieldArray(
'certificate_criteria', 'Certificate criteria',
item_type=criterion_type,
description='Certificate award criteria. Add the criteria which '
'students must meet to be awarded a certificate of completion. '
'In order to receive a certificate, '
'the student must meet all the criteria.',
extra_schema_dict_values={
'is_peer_assessment_table': is_peer_assessment_table,
'className': 'settings-list',
'listAddLabel': 'Add a criterion',
'listRemoveLabel': 'Delete criterion'})
TOTAL_CERTIFICATES = 'total_certificates'
TOTAL_ACTIVE_STUDENTS = 'total_active_students'
TOTAL_STUDENTS = 'total_students'
class CertificatesEarnedGenerator(jobs.AbstractCountingMapReduceJob):
@staticmethod
def get_description():
return 'certificates earned'
def build_additional_mapper_params(self, app_context):
return {'course_namespace': app_context.get_namespace_name()}
@staticmethod
def entity_class():
return models.Student
@staticmethod
def map(student):
params = context.get().mapreduce_spec.mapper.params
ns = params['course_namespace']
app_context = sites.get_course_index().get_app_context_for_namespace(ns)
course = courses.Course(None, app_context=app_context)
if student_is_qualified(student, course):
yield(TOTAL_CERTIFICATES, 1)
if student.scores:
yield(TOTAL_ACTIVE_STUDENTS, 1)
yield(TOTAL_STUDENTS, 1)
class CertificatesEarnedDataSource(data_sources.SynchronousQuery):
@staticmethod
def required_generators():
return [CertificatesEarnedGenerator]
@classmethod
def get_name(cls):
return 'certificates_earned'
@classmethod
def get_title(cls):
return 'Certificates Earned'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Certificates Earned',
description='Scalar values aggregated over entire course giving '
'counts of certificates earned/not-yet-earned. Only one row will '
'ever be returned from this data source.')
reg.add_property(schema_fields.SchemaField(
TOTAL_STUDENTS, 'Total Students', 'integer',
description='Total number of students in course'))
reg.add_property(schema_fields.SchemaField(
TOTAL_CERTIFICATES, 'Total Certificates', 'integer',
description='Total number of certificates earned'))
reg.add_property(schema_fields.SchemaField(
TOTAL_ACTIVE_STUDENTS, 'Total Active Students', 'integer',
description='Number of "active" students. These are students who '
'have taken at least one assessment. Note that it is not likely '
'that a student has achieved a certificate without also being '
'considered "active".'))
return reg.get_json_schema_dict()['properties']
@staticmethod
def fill_values(app_context, template_values, certificates_earned_job):
# Set defaults
template_values.update({
TOTAL_CERTIFICATES: 0,
TOTAL_ACTIVE_STUDENTS: 0,
TOTAL_STUDENTS: 0,
})
# Override with actual values from m/r job, if present.
template_values.update(
jobs.MapReduceJob.get_results(certificates_earned_job))
def register_analytic():
data_sources.Registry.register(CertificatesEarnedDataSource)
name = 'certificates_earned'
title = 'Certificates Earned'
certificates_earned = analytics.Visualization(
name, title, 'certificates_earned.html',
data_source_classes=[CertificatesEarnedDataSource])
tabs.Registry.register('analytics', name, title, [certificates_earned])
class CertificateAggregator(
student_aggregate.AbstractStudentAggregationComponent):
@classmethod
def get_name(cls):
return 'certificate'
@classmethod
def get_event_sources_wanted(cls):
return []
@classmethod
def build_static_params(cls, unused_app_context):
return None
@classmethod
def process_event(cls, event, static_params):
return None
@classmethod
def produce_aggregate(cls, course, student, unused_static_params,
unused_event_items):
return {'earned_certificate': student_is_qualified(student, course)}
@classmethod
def get_schema(cls):
return schema_fields.SchemaField(
'earned_certificate', 'Earned Certificate', 'boolean',
description='Whether the student has earned a course completion '
'certificate based on the criteria in place when this fact was '
'generated.')
custom_module = None
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
register_analytic()
course_settings.CourseSettingsRESTHandler.REQUIRED_MODULES.append(
'inputex-list')
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
courses.Course.SCHEMA_SECTION_COURSE].append(
get_criteria_editor_schema)
course_settings.CourseSettingsHandler.ADDITIONAL_DIRS.append(
os.path.dirname(__file__))
course_settings.CourseSettingsHandler.EXTRA_CSS_FILES.append(
'course_settings.css')
course_settings.CourseSettingsHandler.EXTRA_JS_FILES.append(
'course_settings.js')
utils.StudentProfileHandler.EXTRA_STUDENT_DATA_PROVIDERS.append(
get_certificate_table_entry)
student_aggregate.StudentAggregateComponentRegistry.register_component(
CertificateAggregator)
def on_module_disabled():
course_settings.CourseSettingsRESTHandler.REQUIRED_MODULES.remove(
'inputex-list')
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
courses.Course.SCHEMA_SECTION_COURSE].remove(
get_criteria_editor_schema)
course_settings.CourseSettingsHandler.ADDITIONAL_DIRS.remove(
os.path.dirname(__file__))
course_settings.CourseSettingsHandler.EXTRA_CSS_FILES.remove(
'course_settings.css')
course_settings.CourseSettingsHandler.EXTRA_JS_FILES.remove(
'course_settings.js')
utils.StudentProfileHandler.EXTRA_STUDENT_DATA_PROVIDERS.remove(
get_certificate_table_entry)
global_routes = [
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
namespaced_routes = [
('/' + CERTIFICATE_HANDLER_PATH, ShowCertificateHandler),
('/' + CERTIFICATE_PDF_HANDLER_PATH, ShowCertificatePdfHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Show Certificate',
'A page to show student certificate.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disabled,
notify_module_enabled=on_module_enabled)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File specifying custom certificate criteria functions.
Course authors may specify custom criteria for award of a certificate.
In order to be invoked, all of the following must all apply:
* The function name is specified as a custom_criteria field
in the certificate_criteria group of course.yaml.
* The function name is added to the registration_table whitelist below.
* The function is defined in this module.
The arguments and return type of the function are described in
example_custom_criterion below.
"""
__author__ = 'Glenn De Jonghe (gdejonghe@google.com)'
from models import transforms
# List of str. Holds whitelist of function names which maybe invoked by
# the certificate_criteria > custom_criteria fields in course.yaml.
registration_table = ['example_custom_criterion', 'power_searching_criteria']
def example_custom_criterion(unused_student, unused_course):
"""Example of what a custom criterion function should look like.
Adapt or insert new functions with the same signature for custom criteria.
Add the name of the function to the registration_table if it's an actual
criterion.
This example criterion will award a certificate to every student
in the course.
Args:
unused_student: models.models.Student. The student entity to test.
unused_course: modesl.courses.Course. The course which the student is
enrolled in. Test on this to implement course-specific criteria for
earning a certificate.
Returns:
Boolean value indicating whether the student satisfies the criterion.
"""
return True
def power_searching_criteria(student, unused_course):
"""Criteria for Power Searching with Google."""
scores = transforms.loads(student.scores or '{}')
final_assessment_score = scores.get('Fin', 0)
return final_assessment_score > 66
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETL jobs for the i18n dashboard."""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import os
import sys
import zipfile
from babel import localedata
from common import utils as common_utils
from models import courses
from modules.i18n_dashboard import i18n_dashboard
from tools.etl import etl_lib
_LOG = logging.getLogger('coursebuilder.tools.etl')
def _die(message):
_LOG.critical(message)
sys.exit(1)
class _BaseJob(etl_lib.Job):
@classmethod
def _add_locales_argument(cls, parser):
parser.add_argument(
'--locales', default=[], type=lambda s: s.split(','),
help='Comma-delimited list of locales (for example, "af") to '
'export. If omitted, all locales except the default locale for the '
'course will be exported. Passed locales must exist for the '
'specified course.')
@classmethod
def _check_file_exists(cls, path):
if not os.path.exists(path):
_die('File does not exist: ' + path)
@classmethod
def _check_file_does_not_exist(cls, path):
if os.path.exists(path):
_die('File already exists: ' + path)
@classmethod
def _get_app_context_or_die(cls, course_url_prefix):
app_context = etl_lib.get_context(course_url_prefix)
if not app_context:
_die('Unable to find course with url prefix ' + course_url_prefix)
return app_context
@classmethod
def _get_locales(
cls, requested_locales, all_locales, default_locale,
course_url_prefix):
# We do not want to include the base locale in this list, because
# it is not something people can delete with this tool, and we do not
# want it in the output .zipfile because we don't want users to upload
# it.
all_locales.remove(default_locale)
all_locales = sorted(all_locales)
if not requested_locales:
return all_locales
all_locales = set(all_locales)
requested_locales = set(requested_locales)
missing_locales = requested_locales - all_locales
if missing_locales:
_die(
'Requested locale%s %s not found for course at %s. Choices '
'are: %s' % (
's' if len(missing_locales) > 1 else '',
', '.join(sorted(missing_locales)), course_url_prefix,
', '.join(sorted(all_locales))))
return sorted(requested_locales)
def run(self):
"""Override run() and setup app_context, course and a namespace."""
# ETL import model is complex; run this import here not to interfere
from controllers import sites
app_context = self._get_app_context_or_die(
self.etl_args.course_url_prefix)
course = courses.Course(None, app_context=app_context)
sites.set_path_info(app_context.slug)
courses.Course.set_current(course)
try:
with common_utils.Namespace(app_context.get_namespace_name()):
super(_BaseJob, self).run()
finally:
courses.Course.clear_current()
sites.unset_path_info()
class DeleteTranslations(_BaseJob):
"""Deletes translations from a course based on locales.
Usage for deleting all locales:
sh scripts/etl.sh run modules.i18n_dashboard.jobs.DeleteTranslations \
/target_course appid servername
To delete specific locales:
sh scripts/etl.sh run modules.i18n_dashboard.jobs.DeleteTranslations \
/target_course appid servername \
--job_args='--locales=en_US,fr'
"""
def _configure_parser(self):
self._add_locales_argument(self.parser)
def main(self):
app_context = self._get_app_context_or_die(
self.etl_args.course_url_prefix)
locales = self._get_locales(
self.args.locales, app_context.get_all_locales(),
app_context.default_locale, self.etl_args.course_url_prefix)
i18n_dashboard.TranslationDeletionRestHandler.delete_locales(
courses.Course.get(app_context), locales)
class DownloadTranslations(_BaseJob):
"""Downloads .zip of .po files of translations.
Usage for downloading all locales:
sh scripts/etl.sh run modules.i18n_dashboard.jobs.DownloadTranslations \
/target_course appid servername \
--job_args='/tmp/download.zip'
To download specific locales:
sh scripts/etl.sh run modules.i18n_dashboard.jobs.DownloadTranslations \
/target_course appid servername \
--job_args='/tmp/download.zip --locales=en_US,fr'
"""
_EXPORT_ALL = 'all'
_EXPORT_NEW = 'new'
_EXPORT_CHOICES = frozenset([
_EXPORT_ALL,
_EXPORT_NEW,
])
def _configure_parser(self):
self.parser.add_argument(
'path', type=str, help='Path of the file to save output to')
self.parser.add_argument(
'--export', choices=self._EXPORT_CHOICES, default=self._EXPORT_ALL,
type=str, help='What translation strings to export. Choose '
'"%(new)s" to get items that are new or have out-of-date '
'translations; choose "%(all)s" to get all items. Default: '
'"%(all)s"' % ({
'all': self._EXPORT_ALL,
'new': self._EXPORT_NEW,
}))
self._add_locales_argument(self.parser)
def main(self):
app_context = self._get_app_context_or_die(
self.etl_args.course_url_prefix)
self._check_file_does_not_exist(self.args.path)
locales = self._get_locales(
self.args.locales, app_context.get_all_locales(),
app_context.default_locale, self.etl_args.course_url_prefix)
download_handler = i18n_dashboard.TranslationDownloadRestHandler
translations = download_handler.build_translations(
courses.Course.get(app_context), locales, 'all')
if not translations.keys():
_die(
'No translations found for course at %s; exiting' % (
self.etl_args.course_url_prefix))
with open(self.args.path, 'w') as f:
i18n_dashboard.TranslationDownloadRestHandler.build_zip_file(
courses.Course.get(app_context), f, translations, locales)
_LOG.info('Translations saved to ' + self.args.path)
class TranslateToReversedCase(_BaseJob):
"""Translates a specified course to rEVERSED cASE.
Usage.
sh scripts/etl.sh run \
modules.i18n_dashboard.jobs.TranslateToReversedCase \
/target_course appid servername
"""
def main(self):
app_context = self._get_app_context_or_die(
self.etl_args.course_url_prefix)
i18n_dashboard.I18nReverseCaseHandler.translate_course(
courses.Course.get(app_context))
class UploadTranslations(_BaseJob):
"""Uploads .po or .zip file containing translations.
Usage:
sh scripts/etl.sh run modules.i18n_dashboard.jobs.UploadTranslations \
/target_course appid servername \
--job_args='/tmp/file.zip'
"""
_PO_EXTENSION = '.po'
_ZIP_EXTENSION = '.zip'
_EXTENSIONS = frozenset([
_PO_EXTENSION,
_ZIP_EXTENSION,
])
_UPLOAD_HANDLER = i18n_dashboard.TranslationUploadRestHandler
def _configure_parser(self):
self.parser.add_argument(
'path', type=str, help='.zip or .po file containing translations. '
'If a .zip file is given, its internal structure is unimportant; '
'all .po files it contains will be processed. We do no validation '
'on file contents.')
def main(self):
self._check_file(self.args.path)
app_context = self._get_app_context_or_die(
self.etl_args.course_url_prefix)
extension = self._get_file_extension(self.args.path)
course = courses.Course.get(app_context)
self._configure_babel(course)
if extension == self._PO_EXTENSION:
translations = self._process_po_file(self.args.path)
elif extension == self._ZIP_EXTENSION:
translations = self._process_zip_file(self.args.path)
# Add the locales being uploaded to the UI.
environ = course.get_environ(app_context)
extra_locales = environ.setdefault('extra_locales', [])
for locale in translations:
if not any(
l[courses.Course.SCHEMA_LOCALE_LOCALE] == locale
for l in extra_locales):
extra_locales.append({
courses.Course.SCHEMA_LOCALE_LOCALE: locale,
courses.Course.SCHEMA_LOCALE_AVAILABILITY: (
courses.Course.SCHEMA_LOCALE_AVAILABILITY_UNAVAILABLE)})
course.save_settings(environ)
# Make updates to the translations
self._update_translations(course, translations)
@classmethod
def _check_file(cls, path):
extension = cls._get_file_extension(path)
if not extension or extension not in cls._EXTENSIONS:
_die(
'Invalid file extension: "%s". Choices are: %s' % (
extension, ', '.join(sorted(cls._EXTENSIONS))))
cls._check_file_exists(path)
@classmethod
def _configure_babel(cls, course):
with common_utils.ZipAwareOpen():
# Internally, babel uses the 'en' locale, and we must configure it
# before we make babel calls.
localedata.load('en')
# Also load the course's default language.
localedata.load(course.default_locale)
@classmethod
def _get_file_extension(cls, path):
return os.path.splitext(path)[-1]
@classmethod
def _process_po_file(cls, po_file_path):
translations = cls._UPLOAD_HANDLER.build_translations_defaultdict()
with open(po_file_path) as f:
cls._UPLOAD_HANDLER.parse_po_file(translations, f.read())
return translations
@classmethod
def _process_zip_file(cls, zip_file_path):
zf = zipfile.ZipFile(zip_file_path, 'r', allowZip64=True)
translations = cls._UPLOAD_HANDLER.build_translations_defaultdict()
for zipinfo in zf.infolist():
if cls._get_file_extension(zipinfo.filename) != cls._PO_EXTENSION:
continue
_LOG.info('Processing ' + zipinfo.filename)
po_contents = zf.read(zipinfo.filename)
cls._UPLOAD_HANDLER.parse_po_file(translations, po_contents)
zf.close()
return translations
@classmethod
def _update_translations(cls, course, translations):
messages = []
cls._UPLOAD_HANDLER.update_translations(course, translations, messages)
for message in messages:
_LOG.info(message)
| Python |
# coding: utf-8
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to support internationalization (i18n) workflow."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import collections
import cStringIO
import datetime
import logging
import os
import re
import StringIO
import sys
import urllib
from xml.dom import minidom
import zipfile
from babel import localedata
from babel.messages import catalog
from babel.messages import pofile
import jinja2
from webapp2_extras import i18n
import appengine_config
from common import caching
from common import crypto
from common import locales as common_locales
from common import resource
from common import safe_dom
from common import schema_fields
from common import tags
from common import utils as common_utils
from common import xcontent
from controllers import sites
from controllers import utils
from models import courses
from models import resources_display
from models import custom_modules
from models import custom_units
from models import jobs
from models import models
from models import roles
from models import transforms
from models.config import ConfigProperty
from models.counters import PerfCounter
from modules.dashboard import dashboard
from modules.dashboard import unit_lesson_editor
from modules.oeditor import oeditor
from tools import verify
from google.appengine.ext import db
RESOURCES_PATH = '/modules/i18n_dashboard/resources'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'i18n_dashboard', 'templates')
# The path to the CSS file with application-wide i18n-related styling
GLOBAL_CSS = '/modules/i18n_dashboard/resources/css/global_i18n.css'
VERB_NEW = xcontent.SourceToTargetDiffMapping.VERB_NEW
VERB_CHANGED = xcontent.SourceToTargetDiffMapping.VERB_CHANGED
VERB_CURRENT = xcontent.SourceToTargetDiffMapping.VERB_CURRENT
# This permission grants the user access to the i18n dashboard and console.
ACCESS_PERMISSION = 'access_i18n_dashboard'
ACCESS_PERMISSION_DESCRIPTION = 'Can access I18n Dashboard.'
TYPE_HTML = 'html'
TYPE_STRING = 'string'
TYPE_TEXT = 'text'
TYPE_URL = 'url'
# Filter for those schema fields which are translatable
TRANSLATABLE_FIELDS_FILTER = schema_fields.FieldFilter(
type_names=[TYPE_HTML, TYPE_STRING, TYPE_TEXT, TYPE_URL],
hidden_values=[False],
i18n_values=[None, True],
editable_values=[True])
# Here, using 'ln' because we need a language that Babel knows.
# Lingala ( http://en.wikipedia.org/wiki/Lingala ) is not likely to be
# a target language for courses hosted in CB in the next few years.
PSEUDO_LANGUAGE = 'ln'
custom_module = None
class ResourceBundleKey(object):
"""Manages a key for a resource bundle."""
def __init__(self, type_str, key, locale):
self._locale = locale
self._type = type_str
self._key = key
def __str__(self):
return '%s:%s:%s' % (self._type, self._key, self._locale)
@property
def locale(self):
return self._locale
@property
def resource_key(self):
return resource.Key(self._type, self._key)
@classmethod
def fromstring(cls, key_str):
type_str, key, locale = key_str.split(':', 2)
return ResourceBundleKey(type_str, key, locale)
@classmethod
def from_resource_key(cls, resource_key, locale):
return cls(resource_key.type, resource_key.key, locale)
class NamedJsonDAO(models.BaseJsonDao):
"""Base class for DAOs of entities with named keys."""
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeName
@classmethod
def load_or_create(cls, resource_key):
dto = cls.load(str(resource_key))
if not dto:
dto = cls.create_blank(resource_key)
cls.save(dto)
return dto
@classmethod
def create_blank(cls, resource_key):
return cls.DTO(str(resource_key), {})
class I18nProgressEntity(models.BaseEntity):
"""The base entity for storing i18n workflow information.
Each entity represents one resource in the course.
"""
data = db.TextProperty(indexed=False)
class I18nProgressDTO(object):
"""The lightweight data object for the i18n workflow data."""
NOT_STARTED = 0
IN_PROGRESS = 1
DONE = 2
IS_I18N_KEY = 'is_i18n'
PROGRESS_KEY = 'progress'
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def is_translatable(self):
return self.dict.get(self.IS_I18N_KEY, True)
@is_translatable.setter
def is_translatable(self, value):
assert type(value) == bool
self.dict[self.IS_I18N_KEY] = value
def get_progress(self, locale):
return self.dict.get(self.PROGRESS_KEY, {}).get(
locale, self.NOT_STARTED)
def set_progress(self, locale, value):
progress_dict = self.dict.setdefault(self.PROGRESS_KEY, {})
progress_dict[locale] = value
def clear_progress(self, locale):
self.dict.get(self.PROGRESS_KEY, {}).pop(locale, None)
class I18nProgressDAO(NamedJsonDAO):
"""Access object for the i18n workflow data."""
DTO = I18nProgressDTO
ENTITY = I18nProgressEntity
class ResourceBundleEntity(models.BaseEntity):
"""The base entity for storing i18n resource bundles."""
data = db.TextProperty(indexed=False)
locale = db.StringProperty(indexed=True)
created_on = db.DateTimeProperty(auto_now_add=True, indexed=False)
updated_on = db.DateTimeProperty(indexed=True)
@classmethod
def getsizeof(cls, entity):
return (
sys.getsizeof(entity.data) +
sys.getsizeof(entity.locale) +
sys.getsizeof(entity.created_on) +
sys.getsizeof(entity.updated_on))
class ResourceBundleDTO(object):
"""The lightweight data transfer object for resource bundles.
Resource bundles are keyed by (resource_type, resource_key, locale). The
data stored in the dict follows the following pattern:
{
field_name_1: {
type: <the value type for the field>,
source_value: <only used for html type: the undecomposed source_value>,
data: [
# A list of source/target pairs. The list is a singleton for plain
# string data, and is a list of decomposed chunks for html data
{
source_value: <the original untranslated string>,
target_value: <the translated string>,
}
]
},
field_name_2: ...
}
"""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
class ResourceBundleDAO(NamedJsonDAO):
"""Data access object for resource bundle information."""
DTO = ResourceBundleDTO
ENTITY = ResourceBundleEntity
@classmethod
def before_put(cls, dto, entity):
resource_bundle_key = ResourceBundleKey.fromstring(dto.id)
entity.locale = resource_bundle_key.locale
entity.updated_on = datetime.datetime.utcnow()
@classmethod
def get_all_for_locale(cls, locale):
query = caching.iter_all(
cls.ENTITY.all().filter('locale = ', locale))
return [
cls.DTO(entity.key().id_or_name(), transforms.loads(entity.data))
for entity in query]
@classmethod
def delete_all_for_locale(cls, locale):
# It would be nice if AppEngine DB had a query formulation that
# allowed for deletion, but apparently not so much. Here, at least
# we are only round-tripping the keys, not the whole objects through
# memory.
db.delete(list(common_utils.iter_all(
cls.ENTITY.all(keys_only=True).filter('locale = ', locale))))
class TableRow(object):
"""Class to represent a row in the dashboard table."""
@property
def name(self):
raise NotImplementedError()
@property
def class_name(self):
return ''
@property
def spans_all_columns(self):
return False
def _build_resource_title(app_context, rsrc_type, rsrc):
if rsrc_type == resources_display.ResourceUnit.TYPE:
title = resources_display.display_unit_title(rsrc, app_context)
elif rsrc_type == resources_display.ResourceLesson.TYPE:
title = resources_display.display_lesson_title(
rsrc[0], rsrc[1], app_context)
else:
resource_handler = resource.Registry.get(rsrc_type)
title = resource_handler.get_resource_title(rsrc)
return title
class ResourceRow(TableRow):
"""A row in the dashboard table which displays status of a CB resource."""
DONE_CLASS = 'done'
DONE_STRING = 'Done'
IN_PROGRESS_CLASS = 'in-progress'
IN_PROGRESS_STRING = 'In progress'
NOT_STARTED_CLASS = 'not-started'
NOT_STARTED_STRING = 'Not started'
NOT_TRANSLATABLE_CLASS = 'not-translatable'
def __init__(
self, course, rsrc, type_str, key,
i18n_progress_dto=None, resource_key=None):
self._course = course
self._resource = rsrc
self._type = type_str
self._key = key
if i18n_progress_dto is None:
assert resource_key
self._i18n_progress_dto = I18nProgressDAO.create_blank(resource_key)
else:
self._i18n_progress_dto = i18n_progress_dto
@property
def name(self):
return _build_resource_title(
self._course.app_context, self._type, self._resource)
@property
def class_name(self):
if self._i18n_progress_dto.is_translatable:
return ''
else:
return self.NOT_TRANSLATABLE_CLASS
@property
def resource_key(self):
return resource.Key(self._type, self._key, course=self._course)
@property
def is_translatable(self):
return self._i18n_progress_dto.is_translatable
def status(self, locale):
progress = self._i18n_progress_dto.get_progress(locale)
if progress == I18nProgressDTO.NOT_STARTED:
return self.NOT_STARTED_STRING
elif progress == I18nProgressDTO.IN_PROGRESS:
return self.IN_PROGRESS_STRING
else:
return self.DONE_STRING
def status_class(self, locale):
progress = self._i18n_progress_dto.get_progress(locale)
if progress == I18nProgressDTO.NOT_STARTED:
return self.NOT_STARTED_CLASS
elif progress == I18nProgressDTO.IN_PROGRESS:
return self.IN_PROGRESS_CLASS
else:
return self.DONE_CLASS
def view_url(self, locale):
resource_handler = resource.Registry.get(self._type)
view_url = resource_handler.get_view_url(self._resource)
if view_url:
view_url += '&hl=%s' % locale
return view_url
def edit_url(self, locale):
return TranslationConsole.get_edit_url(
ResourceBundleKey(self._type, self._key, locale))
@property
def base_view_url(self):
return self.view_url(None)
@property
def base_edit_url(self):
return resource.Registry.get(self._type).get_edit_url(self._key)
class SectionRow(TableRow):
"""A row in the table which serves as a section heading."""
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@property
def class_name(self):
return 'section-row'
@property
def spans_all_columns(self):
return True
class EmptyRow(SectionRow):
"""A multi-column row in the table which indicates an empty section."""
def __init__(self, name='Empty section', class_name='empty_section'):
super(EmptyRow, self).__init__(name)
self._class_name = class_name
@property
def class_name(self):
return self._class_name
class IsTranslatableRestHandler(utils.BaseRESTHandler):
"""REST handler to respond to setting a resource as (non-)translatable."""
URL = '/rest/modules/i18n_dashboard/is_translatable'
XSRF_TOKEN_NAME = 'is-translatable'
def post(self):
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {}):
return
if not unit_lesson_editor.CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
i18n_progress_dto = I18nProgressDAO.load_or_create(
payload['resource_key'])
i18n_progress_dto.is_translatable = payload['value']
I18nProgressDAO.save(i18n_progress_dto)
transforms.send_json_response(self, 200, 'OK', {}, None)
class BaseDashboardExtension(object):
ACTION = None
@classmethod
def is_readonly(cls, course):
return course.app_context.get_environ()[
'course'].get('prevent_translation_edits')
@classmethod
def format_readonly_message(cls):
return safe_dom.Element('P').add_text(
'Translation console is currently disabled. '
'Course administrator can enable it via I18N Settings.')
@classmethod
def register(cls):
def get_action(handler):
cls(handler).render()
dashboard.DashboardHandler.add_custom_get_action(cls.ACTION, get_action)
dashboard.DashboardHandler.map_action_to_permission(
'get_%s' % cls.ACTION, ACCESS_PERMISSION)
@classmethod
def unregister(cls):
dashboard.DashboardHandler.remove_custom_get_action(cls.ACTION)
dashboard.DashboardHandler.unmap_action_to_permission(
'get_%s' % cls.ACTION)
def __init__(self, handler):
"""Initialize the class with a request handler.
Args:
handler: modules.dashboard.DashboardHandler. This is the handler
which will do the rendering.
"""
self.handler = handler
class TranslationsAndLocations(object):
def __init__(self):
self._translations = set()
self._locations = []
self._comments = []
self._previous_id = ''
def add_translation(self, translation):
# Don't add "translations" that are blank, unless we have no other
# alternatives.
if translation or not self._translations:
self._translations.add(translation)
# If all we have so far is blank translations, and this one is
# nonblank, throw away all the blank ones.
if translation and not any(self._translations):
self._translations = [translation]
def add_location(self, location):
self._locations.append(location)
def add_comment(self, comment):
comment = unicode(comment) # May be Node or NodeList.
self._comments.append(comment)
def set_previous_id(self, previous_id):
self._previous_id = previous_id
@property
def locations(self):
return self._locations
@property
def translations(self):
return self._translations
@property
def comments(self):
return self._comments
@property
def previous_id(self):
return self._previous_id
class I18nDeletionHandler(BaseDashboardExtension):
ACTION = 'i18n_delete'
def render(self):
main_content = oeditor.ObjectEditor.get_html_for(
self.handler,
TranslationDeletionRestHandler.schema().get_json_schema(),
TranslationDeletionRestHandler.schema().get_schema_dict(),
'',
self.handler.canonicalize_url(TranslationDeletionRestHandler.URL),
self.handler.get_action_url(I18nDashboardHandler.ACTION),
save_button_caption='Delete', auto_return=True,
required_modules=TranslationDeletionRestHandler.REQUIRED_MODULES,
extra_js_files=['delete_translations.js'],
additional_dirs=[TEMPLATES_DIR])
self.handler.render_page({
'page_title': self.handler.format_title(
'I18n Translation Deletion'),
'main_content': main_content})
class TranslationDeletionRestHandler(utils.BaseRESTHandler):
URL = '/rest/modules/i18n_dashboard/i18n_deletion'
XSRF_TOKEN_NAME = 'translation_deletion'
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-hidden',
'inputex-checkbox', 'inputex-list', 'inputex-uneditable',
]
@classmethod
def schema(cls):
schema = schema_fields.FieldRegistry('Translation Deletion')
locales_schema = schema_fields.FieldRegistry(
None, description='locales')
locales_schema.add_property(schema_fields.SchemaField(
'locale', 'Locale', 'string', hidden=True, editable=False))
locales_schema.add_property(schema_fields.SchemaField(
'checked', None, 'boolean'))
locales_schema.add_property(schema_fields.SchemaField(
'title', None, 'string', optional=True, editable=False))
schema.add_property(schema_fields.FieldArray(
'locales', 'Languages', item_type=locales_schema,
description='Select the languages whose translations you '
'wish to delete.',
extra_schema_dict_values={
'className': (
'inputEx-Field inputEx-ListField '
'label-group label-group-list')}))
return schema
def get(self):
course = self.get_course()
default_locale = course.default_locale
locales = []
for locale in course.all_locales:
if locale == default_locale:
continue
locales.append({
'locale': locale,
'checked': False,
'title': common_locales.get_locale_display_name(locale)})
payload_dict = {
'locales': locales,
}
transforms.send_json_response(
self, 200, 'Success.', payload_dict=payload_dict,
xsrf_token=crypto.XsrfTokenManager.create_xsrf_token(
self.XSRF_TOKEN_NAME))
def _validate_inputs(self, course):
if appengine_config.PRODUCTION_MODE:
transforms.send_json_response(
self, 403, 'Not available in production.')
return []
try:
request = models.transforms.loads(self.request.get('request'))
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed or missing "request" parameter.')
return []
try:
payload = models.transforms.loads(request.get('payload', ''))
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed or missing "payload" parameter.')
return []
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {}):
return []
try:
locales = [l['locale'] for l in payload.get('locales')
if l.get('checked')]
except (TypeError, ValueError, KeyError):
transforms.send_json_response(
self, 400, 'Locales specification not as expected.')
return []
if not locales:
# Nice UI message when no locales selected.
transforms.send_json_response(
self, 400, 'Please select at least one language to delete.')
return []
for locale in locales:
if not has_locale_rights(self.app_context, locale):
transforms.send_json_response(self, 401, 'Access denied.')
return []
return locales
@staticmethod
def delete_locales(course, locales):
# First remove progress indications. If this fails or times out,
# we haven't really lost any work; these can be rebuilt.
i18n_progress_dtos = I18nProgressDAO.get_all()
for i18n_progress_dto in i18n_progress_dtos:
for locale in locales:
i18n_progress_dto.clear_progress(locale)
I18nProgressDAO.save_all(i18n_progress_dtos)
# Now remove actual translations.
for locale in locales:
ResourceBundleDAO.delete_all_for_locale(locale)
# When all of the foregoing has completed, remove the course
# setting. (Removing this earlier would be bad; removing this
# tells the UI the locale is gone. If we removed this first,
# and then failed to remove locale items from the DB, confusion
# would likely ensue)
environ = course.get_environ(course.app_context)
extra_locales = environ.get('extra_locales', [])
for configured_locale in list(extra_locales):
if configured_locale['locale'] in locales:
extra_locales.remove(configured_locale)
course.save_settings(environ)
def put(self):
"""Verify inputs and return 200 OK to OEditor when all is well."""
course = self.get_course()
locales = self._validate_inputs(course)
if not locales:
return
self.delete_locales(course, locales)
transforms.send_json_response(self, 200, 'Success.')
class I18nDownloadHandler(BaseDashboardExtension):
ACTION = 'i18n_download'
def render(self):
main_content = oeditor.ObjectEditor.get_html_for(
self.handler,
TranslationDownloadRestHandler.schema().get_json_schema(),
TranslationDownloadRestHandler.schema().get_schema_dict(),
'',
self.handler.canonicalize_url(TranslationDownloadRestHandler.URL),
self.handler.get_action_url(I18nDashboardHandler.ACTION),
required_modules=TranslationDownloadRestHandler.REQUIRED_MODULES,
save_button_caption='Download',
extra_js_files=['download_translations.js'],
additional_dirs=[TEMPLATES_DIR])
self.handler.render_page({
'page_title': self.handler.format_title(
'I18n Translation Download'),
'main_content': main_content})
class TranslationDownloadRestHandler(utils.BaseRESTHandler):
URL = '/rest/modules/i18n_dashboard/i18n_download'
XSRF_TOKEN_NAME = 'translation_download'
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-hidden',
'inputex-checkbox', 'inputex-list', 'inputex-uneditable',
]
@classmethod
def schema(cls):
schema = schema_fields.FieldRegistry('Translation Download')
schema.add_property(schema_fields.SchemaField(
'export_what', 'Export Items', 'string',
select_data=[
('new',
'Only items that are new or have out-of-date translations'),
('all', 'All translatable items')],
description='Select what translation strings to export.'))
locales_schema = schema_fields.FieldRegistry(
None, description='locales')
locales_schema.add_property(schema_fields.SchemaField(
'locale', 'Locale', 'string', hidden=True, editable=False))
locales_schema.add_property(schema_fields.SchemaField(
'checked', None, 'boolean'))
locales_schema.add_property(schema_fields.SchemaField(
'title', None, 'string', optional=True, editable=False))
schema.add_property(schema_fields.FieldArray(
'locales', 'Languages', item_type=locales_schema,
description='Select the languages whose translations you '
'wish to export.',
extra_schema_dict_values={
'className': (
'inputEx-Field inputEx-ListField '
'label-group label-group-list')}))
schema.add_property(schema_fields.SchemaField(
'file_name', 'Download as File Named', 'string'))
return schema
def get(self):
course = self.get_course()
default_locale = course.default_locale
locales = []
for locale in course.all_locales:
if locale == default_locale or locale == PSEUDO_LANGUAGE:
continue
locales.append({
'locale': locale,
'checked': True,
'title': common_locales.get_locale_display_name(locale)})
payload_dict = {
'locales': locales,
'file_name': course.title.lower().replace(' ', '_') + '.zip'
}
transforms.send_json_response(
self, 200, 'Success.', payload_dict=payload_dict,
xsrf_token=crypto.XsrfTokenManager.create_xsrf_token(
self.XSRF_TOKEN_NAME))
@staticmethod
def build_translations(course, locales, export_what):
"""Build up a dictionary of all translated strings -> locale.
For each {original-string,locale}, keep track of the course
locations where this occurs, and each of the translations given.
Args:
course: The course for whose contents we are building translations.
locales: Locales for which translations are desired.
export_what: A string that tells us what should be added to the
translations. The value 'all' exports everything, translated
or not, stale or not. The value 'new' emits only things
that have no translations, or whose translations are out-of-date
with respect to the resource.
Returns:
Map of original-string -> locale -> TranslationsAndLocations instance.
"""
app_context = course.app_context
translations = collections.defaultdict(
lambda: collections.defaultdict(TranslationsAndLocations))
transformer = xcontent.ContentTransformer(
config=I18nTranslationContext.get(app_context))
resource_key_map = TranslatableResourceRegistry.get_resources_and_keys(
course)
# Preload all I18N progress DTOs; we'll need all of them.
i18n_progress_dtos = I18nProgressDAO.get_all()
progress_by_key = {p.id: p for p in i18n_progress_dtos}
for locale in locales:
# Preload all resource bundles for this locale; we need all of them.
resource_bundle_dtos = ResourceBundleDAO.get_all_for_locale(locale)
bundle_by_key = {b.id: b for b in resource_bundle_dtos}
for rsrc, resource_key in resource_key_map:
key = ResourceBundleKey(
resource_key.type, resource_key.key, locale)
# If we don't already have a resource bundle, make it.
resource_bundle_dto = bundle_by_key.get(str(key))
if not resource_bundle_dto:
resource_bundle_dto = ResourceBundleDAO.create_blank(key)
resource_bundle_dtos.append(resource_bundle_dto)
bundle_by_key[resource_bundle_dto.id] = resource_bundle_dto
# If we don't already have a progress record, make it.
i18n_progress_dto = progress_by_key.get(str(resource_key))
if not i18n_progress_dto:
i18n_progress_dto = I18nProgressDAO.create_blank(
resource_key)
i18n_progress_dtos.append(i18n_progress_dto)
progress_by_key[i18n_progress_dto.id] = i18n_progress_dto
# Act as though we are loading the interactive translation
# page and then clicking 'save'. This has the side-effect of
# forcing us to have created the resource bundle and progress
# DTOs, and ensures that the operation here has identical
# behavior with manual operation, and there are thus fewer
# opportunities to go sideways and slip between the cracks.
binding, sections = (
TranslationConsoleRestHandler.build_sections_for_key(
key, course, resource_bundle_dto, transformer))
TranslationConsoleRestHandler.update_dtos_with_section_data(
key, sections, resource_bundle_dto, i18n_progress_dto)
TranslationDownloadRestHandler._collect_section_translations(
translations, sections, binding, export_what, locale, key,
resource_key, rsrc)
ResourceBundleDAO.save_all(resource_bundle_dtos)
I18nProgressDAO.save_all(i18n_progress_dtos)
return translations
@staticmethod
def _collect_section_translations(translations, sections, binding,
export_what, locale, key, resource_key,
rsrc):
# For each section in the translation, make a record of that
# in an internal data store which is used to generate .po
# files.
for section in sections:
section_name = section['name']
section_type = section['type']
description = (
binding.find_field(section_name).description or '')
for translation in section['data']:
message = unicode(translation['source_value'] or '')
translated_message = translation['target_value'] or ''
is_current = translation['verb'] == VERB_CURRENT
old_message = translation['old_source_value']
# Skip exporting blank items; pointless.
if not message:
continue
# If not exporting everything, and the current
# translation is up-to-date, don't export it.
if export_what != 'all' and is_current:
continue
# Set source string and location.
t_and_l = translations[message][locale]
t_and_l.add_location('GCB-1|%s|%s|%s' % (
section_name, section_type, str(key)))
# Describe the location where the item is found.
t_and_l.add_comment(description)
try:
resource_handler = resource.Registry.get(resource_key.type)
title = resource_handler.get_resource_title(rsrc)
if title:
t_and_l.add_comment(title)
except AttributeError:
# Under ETL, there is no real handler and title lookup
# fails. In that case, we lose this data, which is non-
# essential.
pass
# Add either the current translation (if current)
# or the old translation as a remark (if we have one)
if is_current:
t_and_l.add_translation(translated_message)
else:
t_and_l.add_translation('')
if old_message:
t_and_l.set_previous_id(old_message)
if translated_message:
t_and_l.add_comment(
'Previously translated as: "%s"' %
translated_message)
@staticmethod
def build_babel_catalog_for_locale(course, translations, locale):
environ = course.get_environ(course.app_context)
course_title = environ['course'].get('title')
bugs_address = environ['course'].get('admin_user_emails')
organization = environ['base'].get('nav_header')
with common_utils.ZipAwareOpen():
# Load metadata for locale to which we are translating.
localedata.load(locale)
cat = catalog.Catalog(
locale=locale,
project='Translation for %s of %s' % (locale, course_title),
msgid_bugs_address=bugs_address,
copyright_holder=organization)
for tr_id in translations:
if locale in translations[tr_id]:
t_and_l = translations[tr_id][locale]
cat.add(
tr_id, string=t_and_l.translations.pop(),
locations=[(l, 0) for l in t_and_l.locations],
user_comments=t_and_l.comments,
auto_comments=['also translated as "%s"' % s
for s in t_and_l.translations],
previous_id=t_and_l.previous_id)
return cat
@staticmethod
def build_zip_file(course, out_stream, translations, locales):
"""Create a .zip file with one .po file for each translated language.
Args:
course: the Course object that we're building an export for.
out_stream: An open file-like which can be written and seeked.
translations: Map of string -> locale -> TranslationsAndLocations
as returned from build_translations().
locales: The set of locales for which we want to build .po files
"""
app_context = course.app_context
original_locale = app_context.default_locale
with common_utils.ZipAwareOpen():
# Load metadata for 'en', which Babel uses internally.
localedata.load('en')
# Load metadata for source language for course.
localedata.load(original_locale)
zf = zipfile.ZipFile(out_stream, 'w', allowZip64=True)
try:
for locale in locales:
cat = (
TranslationDownloadRestHandler
.build_babel_catalog_for_locale(
course, translations, locale))
filename = os.path.join(
'locale', locale, 'LC_MESSAGES', 'messages.po')
content = cStringIO.StringIO()
try:
pofile.write_po(content, cat, include_previous=True)
zf.writestr(filename, content.getvalue())
finally:
content.close()
finally:
zf.close()
def _send_response(self, out_stream, filename):
self.response.content_type = 'application/octet-stream'
self.response.content_disposition = (
'attachment; filename="%s"' % filename)
self.response.out.write(out_stream.getvalue())
def _validate_inputs(self, course):
if appengine_config.PRODUCTION_MODE:
transforms.send_json_response(
self, 403, 'Not available in production.')
return None, None, None
try:
request = models.transforms.loads(self.request.get('request'))
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed or missing "request" parameter.')
return None, None, None
try:
payload = models.transforms.loads(request.get('payload', ''))
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed or missing "payload" parameter.')
return None, None, None
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {}):
return None, None, None
try:
locales = [l['locale'] for l in payload.get('locales')
if l.get('checked') and l['locale'] != PSEUDO_LANGUAGE]
except (TypeError, ValueError, KeyError):
transforms.send_json_response(
self, 400, 'Locales specification not as expected.')
return None, None, None
if not locales:
# Nice UI message when no locales selected.
transforms.send_json_response(
self, 400, 'Please select at least one language to export.')
return None, None, None
for locale in locales:
if not has_locale_rights(self.app_context, locale):
transforms.send_json_response(self, 401, 'Access denied.')
return None, None, None
export_what = payload.get('export_what', 'new')
file_name = payload.get(
'file_name', course.title.lower().replace(' ', '_') + '.zip')
return locales, export_what, file_name
def put(self):
"""Verify inputs and return 200 OK to OEditor when all is well."""
course = self.get_course()
locales, _, _ = self._validate_inputs(course)
if not locales:
return
transforms.send_json_response(self, 200, 'Success.')
def post(self):
"""Actually generate the download content.
This is a somewhat ugly solution to a somewhat ugly problem.
The problem is this: The OEdtior form expects to see JSON
responses, since it's meant for editing small well-structured
objects. Here, we're perverting that intent, and just using
OEditor to present a form with options about the download.
On successful "save", we have a hook that re-submits a form
to hit the POST action, rather than the default PUT action,
and that triggers the download.
"""
course = self.get_course()
locales, export_what, file_name = self._validate_inputs(course)
if not locales:
return
translations = self.build_translations(course, locales, export_what)
out_stream = StringIO.StringIO()
# zip assumes stream has a real fp; fake it.
out_stream.fp = out_stream
try:
self.build_zip_file(course, out_stream, translations, locales)
self._send_response(out_stream, file_name)
finally:
out_stream.close()
class I18nUploadHandler(BaseDashboardExtension):
ACTION = 'i18n_upload'
def render(self):
main_content = oeditor.ObjectEditor.get_html_for(
self.handler,
TranslationUploadRestHandler.SCHEMA.get_json_schema(),
TranslationUploadRestHandler.SCHEMA.get_schema_dict(),
'',
self.handler.canonicalize_url(TranslationUploadRestHandler.URL),
self.handler.get_action_url(I18nDashboardHandler.ACTION),
required_modules=TranslationUploadRestHandler.REQUIRED_MODULES,
save_method='upload', save_button_caption='Upload',
extra_js_files=['upload_translations.js'],
additional_dirs=[TEMPLATES_DIR])
self.handler.render_page({
'page_title': self.handler.format_title('I18n Translation Upload'),
'main_content': main_content,
})
def translation_upload_generate_schema():
schema = schema_fields.FieldRegistry('Translation Upload')
schema.add_property(schema_fields.SchemaField(
'file', 'Translation File', 'file',
# Not really optional, but oeditor marks un-filled mandatory field as
# an error, and doesn't un-mark when the user has selected a file, so
# cleaner to just not mark as error and catch missing files on
# PUT/POST with a nice error message, which we had to do anyhow.
optional=True,
description='Use this option to nominate a .po file containing '
'translations for a single language, or a .zip file containing '
'multiple translated languages. The internal structure of the .zip '
'file is unimportant; all files ending in ".po" will be considered.'))
return schema
class TranslationUploadRestHandler(utils.BaseRESTHandler):
URL = '/rest/modules/i18n_dashboard/upload'
XSRF_TOKEN_NAME = 'translation-upload'
SCHEMA = translation_upload_generate_schema()
REQUIRED_MODULES = ['inputex-hidden', 'inputex-select', 'inputex-string',
'inputex-uneditable', 'inputex-file',
'io-upload-iframe']
class ProtocolError(Exception):
pass
def get(self):
transforms.send_json_response(
self, 200, 'Success.', payload_dict={'key': None},
xsrf_token=crypto.XsrfTokenManager.create_xsrf_token(
self.XSRF_TOKEN_NAME))
@staticmethod
def build_translations_defaultdict():
# Build up set of all incoming translations as a nested dict:
# locale -> bundle key -> {'original text': 'translated text'}
return collections.defaultdict(lambda: collections.defaultdict(dict))
@staticmethod
def parse_po_file(translations, po_file_content):
"""Collect translations from .po file and group by bundle key."""
pseudo_file = cStringIO.StringIO(po_file_content)
the_catalog = pofile.read_po(pseudo_file)
locale = None
for message in the_catalog:
for location, _ in message.locations:
protocol, _, _, key = location.split('|', 4)
if protocol != 'GCB-1':
raise TranslationUploadRestHandler.ProtocolError(
'Expected location format GCB-1, but had %s' % protocol)
message_locale = ResourceBundleKey.fromstring(key).locale
if locale is None:
locale = message_locale
elif locale != message_locale:
raise TranslationUploadRestHandler.ProtocolError(
'File has translations for both "%s" and "%s"' % (
locale, message_locale))
translations[locale][key][message.id] = message.string
@staticmethod
def update_translations(course, translations, messages):
app_context = course.app_context
transformer = xcontent.ContentTransformer(
config=I18nTranslationContext.get(app_context))
i18n_progress_dtos = I18nProgressDAO.get_all()
progress_by_key = {p.id: p for p in i18n_progress_dtos}
resource_key_map = TranslatableResourceRegistry.get_resources_and_keys(
course)
for locale, resource_translations in translations.iteritems():
used_resource_translations = set()
num_resources = 0
num_replacements = 0
num_blank_translations = 0
resource_bundle_dtos = ResourceBundleDAO.get_all_for_locale(locale)
bundle_by_key = {b.id: b for b in resource_bundle_dtos}
for _, resource_key in resource_key_map:
num_resources += 1
key = ResourceBundleKey(
resource_key.type, resource_key.key, locale)
key_str = str(key)
# Here, be permissive: just create the bundle or progress DTO
# if it does not currently exist. Guaranteed we won't have
# translations for this resource, since we'd have created the
# bundle on export, but this makes us 1:1 with the behavior on
# manual edit and on export.
resource_bundle_dto = bundle_by_key.get(key_str)
if not resource_bundle_dto:
resource_bundle_dto = ResourceBundleDAO.create_blank(key)
resource_bundle_dtos.append(resource_bundle_dto)
bundle_by_key[resource_bundle_dto.id] = resource_bundle_dto
i18n_progress_dto = progress_by_key.get(str(key.resource_key))
if not i18n_progress_dto:
i18n_progress_dto = I18nProgressDAO.create_blank(
resource_key)
i18n_progress_dtos.append(i18n_progress_dto)
progress_by_key[i18n_progress_dto.id] = i18n_progress_dto
translations = resource_translations.get(key_str)
if translations:
used_resource_translations.add(key_str)
else:
# Even though we don't have translations for this resource,
# keep going; we want to update the progress DTO below.
translations = {}
used_translations = set()
_, sections = (
TranslationConsoleRestHandler.build_sections_for_key(
key, course, resource_bundle_dto, transformer))
for section in sections:
for item in section['data']:
source_value = item['source_value']
if not isinstance(source_value, basestring):
source_value = unicode(source_value) # convert num
if source_value not in translations:
messages.append(
'Did not find translation for "%s"' %
source_value)
elif translations[source_value]:
item['target_value'] = translations[source_value]
item['changed'] = True
used_translations.add(source_value)
num_replacements += 1
else:
used_translations.add(source_value)
num_blank_translations += 1
for unused_translation in set(translations) - used_translations:
messages.append(
'Translation for "%s" present but not used.' %
unused_translation)
TranslationConsoleRestHandler.update_dtos_with_section_data(
key, sections, resource_bundle_dto, i18n_progress_dto)
for unused in (
set(resource_translations) - used_resource_translations):
messages.append(
('Translation file had %d items for resource "%s", but '
'course had no such resource.') % (
len(resource_translations[unused]), unused))
messages.append(
('For %s, made %d total replacements in %d resources. '
'%d items in the uploaded file did not have translations.') % (
common_locales.get_locale_display_name(locale),
num_replacements, num_resources, num_blank_translations))
ResourceBundleDAO.save_all(resource_bundle_dtos)
I18nProgressDAO.save_all(i18n_progress_dtos)
def post(self):
if appengine_config.PRODUCTION_MODE:
transforms.send_json_response(
self, 403, 'Not available in production.')
return
try:
request = models.transforms.loads(self.request.get('request'))
except ValueError:
transforms.send_file_upload_response(
self, 400, 'Malformed or missing "request" parameter.')
return
token = request.get('xsrf_token')
if not token or not crypto.XsrfTokenManager.is_xsrf_token_valid(
token, self.XSRF_TOKEN_NAME):
transforms.send_file_upload_response(
self, 403, 'Missing or invalid XSRF token.')
return
if 'file' not in self.request.POST:
transforms.send_file_upload_response(
self, 400, 'Must select a .zip or .po file to upload.')
return
upload = self.request.POST['file']
if not isinstance(upload, cgi.FieldStorage):
transforms.send_file_upload_response(
self, 400, 'Must select a .zip or .po file to upload')
return
file_content = upload.file.read()
if not file_content:
transforms.send_file_upload_response(
self, 400, 'The .zip or .po file must not be empty.')
return
# Get meta-data for supported locales loaded. Need to do this before
# attempting to parse .po file content. Do this now, since we don't
# rely on file names to establish locale, just bundle keys. Since
# bundle keys are in .po file content, and since we need locales
# loaded to parse file content, resolve recursion by pre-emptively
# just grabbing everything.
for locale in self.app_context.get_all_locales():
with common_utils.ZipAwareOpen():
localedata.load(locale)
# Build up set of all incoming translations as a nested dict:
# locale -> bundle key -> {'original text': 'translated text'}
translations = self.build_translations_defaultdict()
try:
try:
zf = zipfile.ZipFile(cStringIO.StringIO(file_content), 'r')
for item in zf.infolist():
if item.filename.endswith('.po'):
self.parse_po_file(translations, zf.read(item))
except zipfile.BadZipfile:
try:
self.parse_po_file(translations, file_content)
except UnicodeDecodeError:
transforms.send_file_upload_response(
self, 400,
'Uploaded file did not parse as .zip or .po file.')
return
except TranslationUploadRestHandler.ProtocolError, ex:
transforms.send_file_upload_response(self, 400, str(ex))
return
if not translations:
transforms.send_file_upload_response(
self, 400, 'No translations found in provided file.')
return
for locale in translations:
if not has_locale_rights(self.app_context, locale):
transforms.send_file_upload_response(
self, 401, 'Access denied.')
return
messages = []
self.update_translations(self.get_course(), translations, messages)
transforms.send_file_upload_response(
self, 200, 'Success.', payload_dict={'messages': messages})
class I18nProgressManager(caching.RequestScopedSingleton):
def __init__(self, course):
self._course = course
self._key_to_progress = None
def _preload(self):
self._key_to_progress = {}
for row in I18nProgressDAO.get_all_iter():
self._key_to_progress[str(resource.Key.fromstring(row.id))] = row
def _get(self, rsrc, type_str, key):
if self._key_to_progress is None:
self._preload()
resource_key = resource.Key(type_str, key)
return ResourceRow(
self._course, rsrc, type_str, key,
i18n_progress_dto=self._key_to_progress.get(str(resource_key)),
resource_key=resource_key)
@classmethod
def get(cls, course, rsrc, type_str, key):
# pylint: disable=protected-access
return cls.instance(course)._get(rsrc, type_str, key)
# all caches must have limits
MAX_GLOBAL_CACHE_SIZE_BYTES = 16 * 1024 * 1024
# we don't track deletions; deleted item will hang around this long
CACHE_ENTRY_TTL_SEC = 5 * 60
# Global memcache controls.
CAN_USE_RESOURCE_BUNDLE_IN_PROCESS_CACHE = ConfigProperty(
'gcb_can_use_resource_bundle_in_process_cache', bool, (
'Whether or not to cache I18N translations. For production this value '
'should be on to enable maximum performance. For development this '
'value should be off so you can see your changes to course content '
'instantaneously.'), default_value=True)
class ProcessScopedResourceBundleCache(caching.ProcessScopedSingleton):
"""This class holds in-process global cache of VFS objects."""
@classmethod
def get_cache_len(cls):
# pylint: disable=protected-access
return len(
ProcessScopedResourceBundleCache.instance()._cache.items.keys())
@classmethod
def get_cache_size(cls):
# pylint: disable=protected-access
return ProcessScopedResourceBundleCache.instance()._cache.total_size
def __init__(self):
self._cache = caching.LRUCache(
max_size_bytes=MAX_GLOBAL_CACHE_SIZE_BYTES)
self._cache.get_entry_size = self._get_entry_size
def _get_entry_size(self, key, value):
return sys.getsizeof(key) + sys.getsizeof(value) if value else 0
@property
def cache(self):
return self._cache
class ResourceBundleCacheEntry(caching.AbstractCacheEntry):
"""Cache entry representing a file."""
def __init__(self, entity):
self.entity = entity
self.created_on = datetime.datetime.utcnow()
def getsizeof(self):
return (
ResourceBundleEntity.getsizeof(self.entity) +
sys.getsizeof(self.created_on))
def has_expired(self):
age = (datetime.datetime.utcnow() - self.created_on).total_seconds()
return age > CACHE_ENTRY_TTL_SEC
def is_up_to_date(self, key, update):
if update and self.entity:
return update.updated_on == self.entity.updated_on
return not update and not self.entity
def updated_on(self):
if self.entity:
return self.entity.updated_on
return None
@classmethod
def externalize(cls, key, entry):
entity = entry.entity
if not entity:
return None
return ResourceBundleDAO.DTO(
entity.key().id_or_name(), transforms.loads(entity.data))
@classmethod
def internalize(cls, key, entity):
return cls(entity)
class ResourceBundleCacheConnection(caching.AbstractCacheConnection):
PERSISTENT_ENTITY = ResourceBundleEntity
CACHE_ENTRY = ResourceBundleCacheEntry
@classmethod
def init_counters(cls):
super(ResourceBundleCacheConnection, cls).init_counters()
cls.CACHE_INHERITED = PerfCounter(
'gcb-models-ResourceBundleConnection-cache-inherited',
'A number of times an object was obtained from the inherited vfs.')
@classmethod
def is_enabled(cls):
return CAN_USE_RESOURCE_BUNDLE_IN_PROCESS_CACHE.value
def __init__(self, namespace):
super(ResourceBundleCacheConnection, self).__init__(namespace)
self.cache = ProcessScopedResourceBundleCache.instance().cache
def get_updates_when_empty(self):
"""Load in all ResourceBundles when cache is empty."""
q = self.PERSISTENT_ENTITY.all()
for entity in caching.iter_all(q):
self.put(entity.key().name(), entity)
self.CACHE_UPDATE_COUNT.inc()
# we don't have any updates to apply; all items are new
return {}
RB_CACHE_LEN = models.counters.PerfCounter(
'gcb-models-ResourceBundleCacheConnection-cache-len',
'A total number of items in cache.')
RB_CACHE_SIZE_BYTES = PerfCounter(
'gcb-models-ResourceBundleCacheConnection-cache-bytes',
'A total size of items in cache in bytes.')
RB_CACHE_LEN.poll_value = ProcessScopedResourceBundleCache.get_cache_len
RB_CACHE_SIZE_BYTES.poll_value = (
ProcessScopedResourceBundleCache.get_cache_size)
ResourceBundleCacheConnection.init_counters()
class I18nResourceBundleManager(caching.RequestScopedSingleton):
"""Class that provides access to in-process ResourceBundle cache.
This class only supports get() and does not intercept put() or delete()
and is unaware of changes to ResourceBundles made in this very process.
When ResourceBundles change, the changes will be picked up when new instance
of this class is created. If you are watching perfomance counters, you will
see EVICT and EXPIRE being incremented, but not DELETE or PUT.
"""
def __init__(self, namespace):
self._conn = ResourceBundleCacheConnection.new_connection(namespace)
def _get(self, key):
found, stream = self._conn.get(key)
if found and stream:
return stream
entity = ResourceBundleDAO.ENTITY_KEY_TYPE.get_entity_by_key(
ResourceBundleEntity, str(key))
if entity:
self._conn.put(key, entity)
return ResourceBundleDAO.DTO(
entity.key().id_or_name(), transforms.loads(entity.data))
self._conn.CACHE_NOT_FOUND.inc()
self._conn.put(key, None)
return None
def _get_multi(self, keys):
return [self._get(key) for key in keys]
@classmethod
def get(cls, app_context, key):
# pylint: disable=protected-access
return cls.instance(app_context.get_namespace_name())._get(key)
@classmethod
def get_multi(cls, app_context, keys):
# pylint: disable=protected-access
return cls.instance(
app_context.get_namespace_name())._get_multi(keys)
class I18nTranslationContext(caching.RequestScopedSingleton):
def __init__(self, app_context):
self.app_context = app_context
self._xcontent_config = None
@classmethod
def _init_xcontent_configuration(cls, app_context):
inline_tag_names = list(xcontent.DEFAULT_INLINE_TAG_NAMES)
opaque_decomposable_tag_names = list(
xcontent.DEFAULT_OPAQUE_DECOMPOSABLE_TAG_NAMES)
recomposable_attributes_map = dict(
xcontent.DEFAULT_RECOMPOSABLE_ATTRIBUTES_MAP)
recomposable_attributes_map['HREF'] = {'A'}
for tag_name, tag_cls in tags.Registry.get_all_tags().items():
tag_schema = None
try:
tag_schema = tag_cls().get_schema(None)
except Exception: # pylint: disable=broad-except
logging.exception('Cannot get schema for %s', tag_name)
continue
index = schema_fields.FieldRegistryIndex(tag_schema)
index.rebuild()
for name in (
TRANSLATABLE_FIELDS_FILTER.filter_field_registry_index(index)
):
inline_tag_names.append(tag_name.upper())
opaque_decomposable_tag_names.append(tag_name.upper())
recomposable_attributes_map.setdefault(
name.upper(), set()).add(tag_name.upper())
return xcontent.Configuration(
inline_tag_names=inline_tag_names,
opaque_decomposable_tag_names=opaque_decomposable_tag_names,
recomposable_attributes_map=recomposable_attributes_map,
omit_empty_opaque_decomposable=False,
sort_attributes=True)
def _get_xcontent_configuration(self):
if self._xcontent_config is None:
self._xcontent_config = self._init_xcontent_configuration(
self.app_context)
return self._xcontent_config
@classmethod
def get(cls, app_context):
# pylint: disable=protected-access
return cls.instance(app_context)._get_xcontent_configuration()
def swapcase(text):
"""Swap case for full words with only alpha/num and punctutation marks."""
def swap(root):
for node in root.childNodes:
if node.nodeType == minidom.Node.TEXT_NODE:
if node.nodeValue:
text = node.nodeValue.swapcase()
# revert swapping of formatting %(...){s, f, ...}
text = re.sub(
r'\%(\([a-zA-Z]*\))?[DIEeFfS]',
lambda m: m.group().swapcase(), text)
# add lambda character at the end to test all code paths
# properly handle a multibyte character in the content
node.nodeValue = text + unichr(0x03BB)
if node.nodeType == minidom.Node.ELEMENT_NODE:
swap(node)
try:
tree = xcontent.TranslationIO.fromstring(text)
swap(tree.documentElement)
return xcontent.TranslationIO.tostring(tree)
except: # pylint: disable=bare-except
logging.exception('Failed swapcase() for: %s', text)
return text
class I18nReverseCaseHandler(BaseDashboardExtension):
"""Provide "translation" that swaps case of letters."""
ACTION = 'i18n_reverse_case'
@classmethod
def translate_course(cls, course):
"""Translates a course to rEVERSED cAPS.
Args:
course: The course for whose contents we are making translations.
Returns:
None.
"""
cls._add_reverse_case_locale(course)
po_file_content = cls._build_reverse_case_translations(course)
cls._set_reverse_case_translations(course, po_file_content)
@staticmethod
def _add_reverse_case_locale(course):
environ = course.get_environ(course.app_context)
extra_locales = environ.setdefault('extra_locales', [])
if not any(
l[courses.Course.SCHEMA_LOCALE_LOCALE] == PSEUDO_LANGUAGE
for l in extra_locales):
extra_locales.append({
courses.Course.SCHEMA_LOCALE_LOCALE: PSEUDO_LANGUAGE,
courses.Course.SCHEMA_LOCALE_AVAILABILITY: (
courses.Course.SCHEMA_LOCALE_AVAILABILITY_UNAVAILABLE)})
course.save_settings(environ)
@staticmethod
def _build_reverse_case_translations(course):
original_locale = course.app_context.default_locale
with common_utils.ZipAwareOpen():
# Load metadata for 'en', which Babel uses internally.
localedata.load('en')
# Load metadata for base course language.
localedata.load(original_locale)
translations = TranslationDownloadRestHandler.build_translations(
course, [PSEUDO_LANGUAGE], 'all')
cat = TranslationDownloadRestHandler.build_babel_catalog_for_locale(
course, translations, PSEUDO_LANGUAGE)
for message in cat:
message.string = swapcase(message.id)
try:
content = cStringIO.StringIO()
pofile.write_po(content, cat)
return content.getvalue()
finally:
content.close()
@staticmethod
def _set_reverse_case_translations(course, po_file_content):
translations = (
TranslationUploadRestHandler.build_translations_defaultdict())
TranslationUploadRestHandler.parse_po_file(
translations, po_file_content)
messages = []
TranslationUploadRestHandler.update_translations(course, translations,
messages)
for message in messages:
logging.warning(message)
def render(self):
course = self.handler.get_course()
self.translate_course(course)
self.handler.redirect(
self.handler.get_action_url(I18nDashboardHandler.ACTION))
class AbstractTranslatableResourceType(object):
@classmethod
def get_ordering(cls):
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_title(cls):
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_resources_and_keys(cls, course):
raise NotImplementedError('Derived classes must implement this.')
class TranslatableResourceRegistry(object):
ORDERING_FIRST = 0
ORDERING_EARLY = 3
ORDERING_MIDDLE = 5
ORDERING_LATE = 8
ORDERING_LAST = 10
_RESOURCE_TYPES = []
_RESOURCE_TITLES = set()
@classmethod
def register(cls, translatable_resource):
title = translatable_resource.get_title()
if title in cls._RESOURCE_TITLES:
raise ValueError(
'Title "%s" is already registered as a translatable resource.' %
title)
cls._RESOURCE_TITLES.add(title)
cls._RESOURCE_TYPES.append(translatable_resource)
@classmethod
def get_all(cls):
return [x for x in sorted(cls._RESOURCE_TYPES,
key=lambda x: x.get_ordering())]
@classmethod
def get_resources_and_keys(cls, course):
ret = []
for resource_type in cls.get_all():
ret += resource_type.get_resources_and_keys(course)
return ret
class TranslatableResourceCourseSettings(AbstractTranslatableResourceType):
@classmethod
def get_ordering(cls):
return TranslatableResourceRegistry.ORDERING_FIRST
@classmethod
def get_title(cls):
return 'Course Settings'
@classmethod
def get_resources_and_keys(cls, course):
ret = []
for section_name in sorted(courses.Course.get_schema_sections()):
ret.append((
resources_display.ResourceCourseSettings.get_resource(
course, section_name),
resource.Key(resources_display.ResourceCourseSettings.TYPE,
section_name, course),
))
return ret
class TranslatableResourceCourseComponents(AbstractTranslatableResourceType):
@classmethod
def get_ordering(cls):
return TranslatableResourceRegistry.ORDERING_MIDDLE
@classmethod
def get_title(cls):
return 'Course Outline'
@classmethod
def get_resources_and_keys(cls, course):
ret = []
for unit in course.get_units():
if course.get_parent_unit(unit.unit_id):
continue
if unit.is_custom_unit():
key = custom_units.UnitTypeRegistry.i18n_resource_key(
course, unit)
if key:
ret.append((unit, key))
else:
ret.append(
(unit, resources_display.ResourceUnitBase.key_for_unit(
unit, course)))
if unit.type == verify.UNIT_TYPE_UNIT:
if unit.pre_assessment:
assessment = course.find_unit_by_id(unit.pre_assessment)
ret.append(
(assessment,
resource.Key(
resources_display.ResourceAssessment.TYPE,
unit.pre_assessment, course)))
for lesson in course.get_lessons(unit.unit_id):
ret.append(((unit, lesson),
resource.Key(
resources_display.ResourceLesson.TYPE,
lesson.lesson_id, course)))
if unit.post_assessment:
assessment = course.find_unit_by_id(
unit.post_assessment)
ret.append(
(assessment,
resource.Key(
resources_display.ResourceAssessment.TYPE,
unit.post_assessment, course)))
return ret
class TranslatableResourceQuestions(AbstractTranslatableResourceType):
@classmethod
def get_ordering(cls):
return TranslatableResourceRegistry.ORDERING_LATE
@classmethod
def get_title(cls):
return 'Questions'
@classmethod
def get_resources_and_keys(cls, course):
ret = []
for qu in models.QuestionDAO.get_all():
ret.append((qu, resource.Key(
resources_display.ResourceQuestionBase.get_question_key_type(
qu), qu.id, course)))
return ret
class TranslatableResourceQuestionGroups(AbstractTranslatableResourceType):
@classmethod
def get_ordering(cls):
return TranslatableResourceRegistry.ORDERING_LATE
@classmethod
def get_title(cls):
return 'Question Groups'
@classmethod
def get_resources_and_keys(cls, course):
ret = []
for qg in models.QuestionGroupDAO.get_all():
ret.append((qg, resource.Key(
resources_display.ResourceQuestionGroup.TYPE, qg.id, course)))
return ret
class TranslatableResourceHtmlHooks(AbstractTranslatableResourceType):
@classmethod
def get_ordering(cls):
return TranslatableResourceRegistry.ORDERING_LAST
@classmethod
def get_title(cls):
return 'HTML Hooks'
@classmethod
def get_resources_and_keys(cls, course):
ret = [(v, k)
for k, v in utils.ResourceHtmlHook.get_all(course).iteritems()]
ret.sort(key=lambda row: row[0][utils.ResourceHtmlHook.NAME])
return ret
class I18nDashboardHandler(BaseDashboardExtension):
"""Provides the logic for rendering the i18n workflow dashboard."""
ACTION = 'i18n_dashboard'
def __init__(self, handler):
super(I18nDashboardHandler, self).__init__(handler)
self.course = handler.get_course()
all_locales = self.handler.app_context.get_all_locales()
self.main_locale = all_locales[0]
self.extra_locales = all_locales[1:]
def _make_table_section(self, data_rows, section_title):
rows = []
rows.append(EmptyRow(name='', class_name='blank-row'))
rows.append(SectionRow(section_title))
if data_rows:
rows += data_rows
else:
rows.append(EmptyRow())
return rows
def render(self):
rows = []
for resource_handler in TranslatableResourceRegistry.get_all():
data_rows = []
for rsrc, key in resource_handler.get_resources_and_keys(
self.course):
data_rows.append(I18nProgressManager.get(
self.course, rsrc, key.type, key.key))
rows += self._make_table_section(
data_rows, resource_handler.get_title())
if not [row for row in rows if type(row) is ResourceRow]:
rows = [EmptyRow(name='No course content')]
permitted_locales = []
for locale in self.extra_locales:
if roles.Roles.is_user_allowed(
self.handler.app_context, custom_module,
locale_to_permission(locale)
):
permitted_locales.append(locale)
template_values = {
'extra_locales': permitted_locales,
'rows': rows,
'num_columns': len(permitted_locales) + 1,
'is_readonly': self.is_readonly(self.course),
}
if roles.Roles.is_course_admin(self.handler.app_context):
template_values['main_locale'] = self.main_locale
template_values['is_translatable_xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token(
IsTranslatableRestHandler.XSRF_TOKEN_NAME))
template_values['num_columns'] += 1
main_content = self.handler.get_template(
'i18n_dashboard.html', [TEMPLATES_DIR]).render(template_values)
edit_actions = [
{
'id': 'delete_translation',
'caption': 'Delete Translations',
'href': self.handler.get_action_url(
I18nDeletionHandler.ACTION),
},
{
'id': 'upload_translation_files',
'caption': 'Upload Translation Files',
'href': self.handler.get_action_url(
I18nUploadHandler.ACTION),
},
{
'id': 'download_translation_files',
'caption': 'Download Translation Files',
'href': self.handler.get_action_url(
I18nDownloadHandler.ACTION),
},
]
translate_actions = [
{
'id': 'translate_to_reverse_case',
'caption': '"Translate" to rEVERSED cAPS',
'href': self.handler.get_action_url(
I18nReverseCaseHandler.ACTION),
},
]
actions = []
if not self.is_readonly(self.course):
actions += translate_actions
if len(self.course.all_locales) > 1:
actions += edit_actions
if appengine_config.PRODUCTION_MODE:
message = (
'This operation takes a substantial amount of time, and '
'is very likely to time out when executed from a web '
'browser talking to a production server. Alternatives '
'are to work with a development server or use the ETL '
'scripts to do translation upload/download. See the '
'file .../scripts/etl.sh in your Course Builder download for '
'complete instructions on usage. ')
disabled_alert = 'javascript: alert("%s")' % message
for action in actions:
del action['href']
action['action'] = disabled_alert
actions += [
{
'id': 'edit_18n_settings',
'caption': 'Edit I18N Settings',
'href': self.handler.get_action_url(
'settings', extra_args={
'tab': 'i18n',
'exit_url': 'dashboard?action=i18n_dashboard',
})
},
]
self.handler.render_page({
'page_title': self.handler.format_title('I18n Workflow'),
'main_content': jinja2.utils.Markup(main_content),
'sections': [{
'title': 'Internationalization%s' % (
' (readonly)' if self.is_readonly(
self.course) else ''),
'actions': actions,
'pre': ' ',
}]
})
class TranslationConsole(BaseDashboardExtension):
ACTION = 'i18_console'
@classmethod
def get_edit_url(cls, key):
return 'dashboard?%s' % urllib.urlencode({
'action': cls.ACTION,
'key': key})
def render(self):
main_content = oeditor.ObjectEditor.get_html_for(
self.handler,
TranslationConsoleRestHandler.SCHEMA.get_json_schema(),
TranslationConsoleRestHandler.SCHEMA.get_schema_dict(),
self.handler.request.get('key'),
self.handler.canonicalize_url(TranslationConsoleRestHandler.URL),
self.handler.get_action_url(I18nDashboardHandler.ACTION),
auto_return=False,
required_modules=TranslationConsoleRestHandler.REQUIRED_MODULES,
extra_css_files=['translation_console.css'],
extra_js_files=['translation_console.js'],
additional_dirs=[TEMPLATES_DIR])
if self.is_readonly(self.handler.get_course()):
main_content = self.format_readonly_message()
self.handler.render_page({
'page_title': self.handler.format_title('I18n Workflow'),
'main_content': main_content})
def tc_generate_schema():
schema = schema_fields.FieldRegistry(
'Translation Console', extra_schema_dict_values={
'className': 'inputEx-Group translation-console'})
schema.add_property(schema_fields.SchemaField(
'title', 'Title', 'string', editable=False))
schema.add_property(schema_fields.SchemaField(
'key', 'ID', 'string', hidden=True))
schema.add_property(schema_fields.SchemaField(
'source_locale', 'Source Locale', 'string', hidden=True))
schema.add_property(schema_fields.SchemaField(
'target_locale', 'Target Locale', 'string', hidden=True))
section = schema_fields.FieldRegistry(
None, 'section', extra_schema_dict_values={
'className': 'inputEx-Group translation-item'})
section.add_property(schema_fields.SchemaField(
'name', '', 'string', hidden=True))
section.add_property(schema_fields.SchemaField(
'label', 'Name', 'string', editable=False))
section.add_property(schema_fields.SchemaField(
'type', 'Type', 'string', editable=False, optional=True))
section.add_property(schema_fields.SchemaField(
'source_value', 'source_value', 'string', hidden=True, optional=True))
item = schema_fields.FieldRegistry(None, 'item')
item.add_property(schema_fields.SchemaField(
'source_value', 'Original', 'string', optional=True,
extra_schema_dict_values={'_type': 'text', 'className': 'disabled'}))
item.add_property(schema_fields.SchemaField(
'target_value', 'Translated', 'string', optional=True,
extra_schema_dict_values={'_type': 'text', 'className': 'active'}))
item.add_property(schema_fields.SchemaField(
'verb', 'Verb', 'number', hidden=True, optional=True))
item.add_property(schema_fields.SchemaField(
'old_source_value', 'Old Source Value', 'string', hidden=True,
optional=True))
item.add_property(schema_fields.SchemaField(
'changed', 'Changed', 'boolean', hidden=True, optional=True))
section.add_property(schema_fields.FieldArray(
'data', 'Data', item_type=item,
extra_schema_dict_values={}))
schema.add_property(schema_fields.FieldArray(
'sections', 'Sections', item_type=section))
return schema
class TranslationConsoleRestHandler(utils.BaseRESTHandler):
URL = '/rest/modules/i18n_dashboard/translation_console'
XSRF_TOKEN_NAME = 'translation-console'
SCHEMA = tc_generate_schema()
REQUIRED_MODULES = [
'inputex-hidden', 'inputex-list', 'inputex-string', 'inputex-textarea',
'inputex-uneditable']
def get(self):
def cmp_sections(section1, section2):
"""Comparator to sort the sections in schema order."""
name1 = section1['name']
name2 = section2['name']
path1 = name1.split(':')
path2 = name2.split(':')
for part1, part2 in zip(path1, path2):
if part1[0] == '[' and part1[-1] == ']':
assert part2[0] == '[' and part2[-1] == ']'
c = cmp(int(part1[1:-1]), int(part2[1:-1]))
if c != 0:
return c
else:
continue
elif part1 != part2:
name_no_index1, _ = (
schema_fields.FieldRegistry.compute_name(path1))
name_no_index2, _ = (
schema_fields.FieldRegistry.compute_name(path2))
return cmp(
binding.index.names_in_order.index(name_no_index1),
binding.index.names_in_order.index(name_no_index2))
return cmp(len(path1), len(path2))
key = ResourceBundleKey.fromstring(self.request.get('key'))
if not has_locale_rights(self.app_context, key.locale):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': str(key)})
return
resource_bundle_dto = I18nResourceBundleManager.get(
self.app_context, str(key))
transformer = xcontent.ContentTransformer(
config=I18nTranslationContext.get(self.app_context))
course = self.get_course()
binding, sections = self.build_sections_for_key(
key, course, resource_bundle_dto, transformer)
resource_key = key.resource_key
resource_handler = resource.Registry.get(resource_key.type)
rsrc = resource_handler.get_resource(course, resource_key.key)
title = _build_resource_title(self.app_context, resource_key.type, rsrc)
payload_dict = {
'key': str(key),
'title': unicode(title),
'source_locale': self.app_context.default_locale,
'target_locale': key.locale,
'sections': sorted(sections, cmp=cmp_sections)
}
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=payload_dict,
xsrf_token=crypto.XsrfTokenManager.create_xsrf_token(
self.XSRF_TOKEN_NAME))
def put(self):
request = transforms.loads(self.request.get('request'))
key = ResourceBundleKey.fromstring(request['key'])
validate = request.get('validate', False)
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {'key': str(key)}):
return
if not has_locale_rights(self.app_context, key.locale):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': str(key)})
return
payload = transforms.loads(request['payload'])
payload_dict = transforms.json_to_dict(
payload, self.SCHEMA.get_json_schema_dict())
# Update the resource bundle
resource_bundle_dto = ResourceBundleDAO.load_or_create(key)
i18n_progress_dto = I18nProgressDAO.load_or_create(key.resource_key)
self.update_dtos_with_section_data(
key, payload_dict['sections'], resource_bundle_dto,
i18n_progress_dto)
if validate:
section_names = [
section['name'] for section in payload_dict['sections']]
report = self._get_validation_report(
key, section_names, resource_bundle_dto)
transforms.send_json_response(self, 200, 'OK', payload_dict=report)
else:
I18nProgressDAO.save(i18n_progress_dto)
ResourceBundleDAO.save(resource_bundle_dto)
if (key.resource_key.type ==
resources_display.ResourceCourseSettings.TYPE):
self.get_course().invalidate_cached_course_settings()
transforms.send_json_response(self, 200, 'Saved.')
def _get_validation_report(self, key, section_names, resource_bundle_dto):
report = {}
for name in section_names:
section = resource_bundle_dto.dict.get(name)
if section is None:
report[name] = {
'status': LazyTranslator.NOT_STARTED_TRANSLATION,
'errm': 'No translation saved yet'}
continue
source_value = (
section['source_value'] if section['type'] == TYPE_HTML
else section['data'][0]['source_value'])
translator = LazyTranslator(
self.app_context, key, source_value, section)
output = unicode(translator)
report[name] = {
'status': translator.status,
'errm': translator.errm,
'output': output}
return report
@staticmethod
def update_dtos_with_section_data(key, sections, resource_bundle_dto,
i18n_progress_dto):
if not resource_bundle_dto:
resource_bundle_dto = ResourceBundleDTO(key, {})
for section in sections:
changed = False
data = []
for item in section['data']:
if item['changed']:
changed = True
data.append({
'source_value': item['source_value'],
'target_value': item['target_value']})
elif item['verb'] == VERB_CHANGED:
data.append({
'source_value': item['old_source_value'],
'target_value': item['target_value']})
elif item['verb'] == VERB_CURRENT:
data.append({
'source_value': item['source_value'],
'target_value': item['target_value']})
else: # when it is VERB_NEW
pass
if changed:
source_value = None
if section['type'] == TYPE_HTML:
source_value = section['source_value']
resource_bundle_dto.dict[section['name']] = {
'type': section['type'],
'source_value': source_value,
'data': data,
}
# Update the progress
any_done = False
all_done = True
for section in sections:
for item in section['data']:
# In theory, 'both_blank' will never happen, but
# belt-and-suspenders.
both_blank = (not item['source_value'] and
not item['target_value'])
has_up_to_date_translation = (item['target_value'] and
(item['verb'] == VERB_CURRENT or
item['changed']))
if both_blank or has_up_to_date_translation:
any_done = True
else:
all_done = False
# If we have a stale translation, but there is a value for it,
# consider that to be in-progress.
if (item['verb'] == VERB_CHANGED and not item['changed'] and
item['target_value']):
any_done = True
all_done = False
if all_done:
progress = I18nProgressDTO.DONE
elif any_done:
progress = I18nProgressDTO.IN_PROGRESS
else:
progress = I18nProgressDTO.NOT_STARTED
i18n_progress_dto.set_progress(key.locale, progress)
@staticmethod
def build_sections_for_key(
key, course, resource_bundle_dto, transformer):
def add_known_translations_as_defaults(locale, sections):
try:
translations = i18n.get_store().get_translations(locale)
except AssertionError:
# We're in an environment, like ETL, where we cannot get_store()
# because we're not in a request in the container so we don't
# have a WSGIApplication. In that case, we return here and
# accept some missing (nonessential) values in the output files.
return
for section in sections:
for item in section['data']:
if item['verb'] == VERB_NEW:
# NOTE: The types of source values we are getting here
# include: unicode, str, float, and None. It appears
# to be harmless to force a conversion to unicode so
# that we are uniform in what we are asking for a
# translation for.
source_value = unicode(item['source_value'] or '')
if source_value:
target_value = translations.gettext(source_value)
# File under very weird: Mostly, the i18n library
# hands back unicode instances. However,
# sometimes it will give back a string. And
# sometimes, that string is the UTF-8 encoding of
# a unicode string. Convert it back to unicode,
# because trying to do reasonable things on such
# values (such as casting to unicode) will raise
# an exception.
if type(target_value) == str:
try:
target_value = target_value.decode('utf-8')
except UnicodeDecodeError:
pass
if target_value != source_value:
item['target_value'] = target_value
# Flag the text as needing accepted
item['verb'] = VERB_CHANGED
schema = key.resource_key.get_schema(course)
values = key.resource_key.get_data_dict(course)
binding = schema_fields.ValueToTypeBinding.bind_entity_to_schema(
values, schema)
allowed_names = TRANSLATABLE_FIELDS_FILTER.filter_value_to_type_binding(
binding)
existing_mappings = []
if resource_bundle_dto:
for name, value in resource_bundle_dto.dict.items():
if value['type'] == TYPE_HTML:
source_value = value['source_value']
target_value = ''
else:
source_value = value['data'][0]['source_value']
target_value = value['data'][0]['target_value']
existing_mappings.append(xcontent.SourceToTargetMapping(
name, None, value['type'], source_value, target_value))
mappings = xcontent.SourceToTargetDiffMapping.map_source_to_target(
binding, allowed_names=allowed_names,
existing_mappings=existing_mappings)
map_lists_source_to_target = (
xcontent.SourceToTargetDiffMapping.map_lists_source_to_target)
sections = []
for mapping in mappings:
if mapping.type == TYPE_HTML:
html_existing_mappings = []
if resource_bundle_dto:
field_dict = resource_bundle_dto.dict.get(mapping.name)
if field_dict:
html_existing_mappings = field_dict['data']
context = xcontent.Context(
xcontent.ContentIO.fromstring(mapping.source_value))
transformer.decompose(context)
html_mappings = map_lists_source_to_target(
context.resource_bundle,
[m['source_value'] for m in html_existing_mappings])
source_value = mapping.source_value
data = []
for html_mapping in html_mappings:
if html_mapping.target_value_index is not None:
target_value = html_existing_mappings[
html_mapping.target_value_index]['target_value']
else:
target_value = ''
data.append({
'source_value': html_mapping.source_value,
'old_source_value': html_mapping.target_value,
'target_value': target_value,
'verb': html_mapping.verb,
'changed': False})
else:
old_source_value = ''
if mapping.verb == VERB_CHANGED:
existing_mapping = (
xcontent.SourceToTargetMapping.find_mapping(
existing_mappings, mapping.name))
if existing_mapping:
old_source_value = existing_mapping.source_value
source_value = ''
data = [{
'source_value': mapping.source_value,
'old_source_value': old_source_value,
'target_value': mapping.target_value,
'verb': mapping.verb,
'changed': False}]
if any([item['source_value'] for item in data]):
sections.append({
'name': mapping.name,
'label': mapping.label,
'type': mapping.type,
'source_value': source_value,
'data': data
})
if key.locale != course.app_context.default_locale:
add_known_translations_as_defaults(key.locale, sections)
return binding, sections
class I18nProgressDeferredUpdater(jobs.DurableJob):
"""Deferred job to update progress state."""
@staticmethod
def is_translatable_course():
app_context = sites.get_course_for_current_request()
if not app_context:
return False
environ = courses.Course.get_environ(app_context)
return environ.get('extra_locales', [])
@staticmethod
def on_lesson_changed(lesson):
if not I18nProgressDeferredUpdater.is_translatable_course():
return
key = resource.Key(
resources_display.ResourceLesson.TYPE, lesson.lesson_id)
I18nProgressDeferredUpdater.update_resource(key)
@staticmethod
def on_unit_changed(unit):
if not I18nProgressDeferredUpdater.is_translatable_course():
return
key = resources_display.ResourceUnitBase.key_for_unit(unit)
I18nProgressDeferredUpdater.update_resource(key)
@staticmethod
def on_questions_changed(question_dto_list):
if not I18nProgressDeferredUpdater.is_translatable_course():
return
key_list = [
resource.Key(
resources_display.ResourceQuestionBase.get_question_key_type(
question_dto),
question_dto.id)
for question_dto in question_dto_list]
I18nProgressDeferredUpdater.update_resource_list(key_list)
@staticmethod
def on_question_groups_changed(question_group_dto_list):
if not I18nProgressDeferredUpdater.is_translatable_course():
return
key_list = [
resource.Key(resources_display.ResourceQuestionGroup.TYPE,
question_group_dto.id)
for question_group_dto in question_group_dto_list]
I18nProgressDeferredUpdater.update_resource_list(key_list)
@staticmethod
def on_course_settings_changed(course_settings):
if not I18nProgressDeferredUpdater.is_translatable_course():
return
app_context = sites.get_course_for_current_request()
course = courses.Course.get(app_context)
resources_and_keys = (
TranslatableResourceCourseSettings.get_resources_and_keys(course))
I18nProgressDeferredUpdater.update_resource_list([
key for _, key in resources_and_keys])
@classmethod
def update_resource(cls, resource_key):
cls.update_resource_list([resource_key])
@classmethod
def update_resource_list(cls, resource_key_list):
app_context = sites.get_course_for_current_request()
cls(app_context, resource_key_list).submit()
def __init__(self, app_context, resource_key_list):
super(I18nProgressDeferredUpdater, self).__init__(app_context)
self._resource_key_list = resource_key_list
def run(self):
# Fake a request URL to make sites.get_course_for_current_request work
sites.set_path_info(self._app_context.slug)
try:
for resource_key in self._resource_key_list:
self._update_progress_for_resource(resource_key)
finally:
sites.unset_path_info()
def _update_progress_for_resource(self, resource_key):
i18n_progress_dto = I18nProgressDAO.load_or_create(str(resource_key))
for locale in self._app_context.get_all_locales():
if locale != self._app_context.default_locale:
key = ResourceBundleKey.from_resource_key(resource_key, locale)
self._update_progress_for_locale(key, i18n_progress_dto)
I18nProgressDAO.save(i18n_progress_dto)
def _update_progress_for_locale(self, key, i18n_progress_dto):
course = courses.Course(None, app_context=self._app_context)
resource_bundle_dto = ResourceBundleDAO.load(str(key))
transformer = xcontent.ContentTransformer(
config=I18nTranslationContext.get(self._app_context))
_, sections = TranslationConsoleRestHandler.build_sections_for_key(
key, course, resource_bundle_dto, transformer)
TranslationConsoleRestHandler.update_dtos_with_section_data(
key, sections, resource_bundle_dto, i18n_progress_dto)
class LazyTranslator(object):
NOT_STARTED_TRANSLATION = 0
VALID_TRANSLATION = 1
INVALID_TRANSLATION = 2
@classmethod
def json_encode(cls, obj):
if isinstance(obj, cls):
return unicode(obj)
return None
def __init__(self, app_context, key, source_value, translation_dict):
assert source_value is None or isinstance(source_value, basestring)
self._app_context = app_context
self._key = key
self.source_value = source_value
self.target_value = None
self.translation_dict = translation_dict
self._status = self.NOT_STARTED_TRANSLATION
self._errm = ''
@property
def status(self):
return self._status
@property
def errm(self):
return self._errm
def __str__(self):
if self.target_value is not None:
return self.target_value
# Empty source strings will not be translated because they cannot be
# edited in the TranslationConsole. If a translation for an empty string
# is really required, the source string should be set to a I18N comment.
if self.source_value is None or not self.source_value.strip():
return ''
if self.translation_dict['type'] == TYPE_HTML:
self.target_value = self._translate_html()
else:
self.target_value = self._translate_text()
return self.target_value
def __len__(self):
return len(unicode(self))
def __add__(self, other):
return unicode(self) + other
def __mod__(self, other):
return unicode(self) % other
def upper(self):
return unicode(self).upper()
def lower(self):
return unicode(self).lower()
def _translate_text(self):
self._status = self.VALID_TRANSLATION
return self.translation_dict['data'][0]['target_value']
def _translate_html(self):
self._status = self.INVALID_TRANSLATION
try:
context = xcontent.Context(xcontent.ContentIO.fromstring(
self.source_value))
transformer = xcontent.ContentTransformer(
config=I18nTranslationContext.get(self._app_context))
transformer.decompose(context)
data_list = self.translation_dict['data']
diff_mapping_list = (
xcontent.SourceToTargetDiffMapping.map_lists_source_to_target(
context.resource_bundle, [
data['source_value']
for data in data_list]))
count_misses = 0
if len(context.resource_bundle) < len(data_list):
count_misses = len(data_list) - len(context.resource_bundle)
resource_bundle = []
for mapping in diff_mapping_list:
if mapping.verb == VERB_CURRENT:
resource_bundle.append(
data_list[mapping.target_value_index]['target_value'])
elif mapping.verb in [VERB_CHANGED, VERB_NEW]:
count_misses += 1
resource_bundle.append(
context.resource_bundle[mapping.source_value_index])
else:
raise ValueError('Unknown verb: %s' % mapping.verb)
errors = []
transformer.recompose(context, resource_bundle, errors)
body = xcontent.ContentIO.tostring(context.tree)
if count_misses == 0 and not errors:
self._status = self.VALID_TRANSLATION
return body
else:
parts = 'part' if count_misses == 1 else 'parts'
are = 'is' if count_misses == 1 else 'are'
self._errm = (
'The content has changed and {n} {parts} of the '
'translation {are} out of date.'.format(
n=count_misses, parts=parts, are=are))
return self._detailed_error(self._errm, self._fallback(body))
except Exception as ex: # pylint: disable=broad-except
logging.exception('Unable to translate: %s', self.source_value)
self._errm = str(ex)
return self._detailed_error(
str(ex), self._fallback(self.source_value))
def _fallback(self, default_body):
"""Try to fallback to the last known good translation."""
source_value = self.translation_dict['source_value']
try:
resource_bundle = [
item['target_value'] for item in self.translation_dict['data']]
context = xcontent.Context(
xcontent.ContentIO.fromstring(source_value))
transformer = xcontent.ContentTransformer(
config=I18nTranslationContext.get(self._app_context))
transformer.decompose(context)
transformer.recompose(context, resource_bundle, [])
return xcontent.ContentIO.tostring(context.tree)
except Exception: # pylint: disable=broad-except
logging.exception('Unable to fallback translate: %s', source_value)
return default_body
def _detailed_error(self, msg, body):
if roles.Roles.is_user_allowed(
self._app_context, custom_module,
locale_to_permission(self._app_context.get_current_locale())
):
template_env = self._app_context.get_template_environ(
self._app_context.get_current_locale(), [TEMPLATES_DIR])
template = template_env.get_template('lazy_loader_error.html')
return template.render({
'error_message': msg,
'edit_url': TranslationConsole.get_edit_url(self._key),
'body': body})
else:
return body
def set_attribute(course, key, thing, attribute_name, translation_dict):
# TODO(jorr): Need to be able to deal with hierarchical names from the
# schema, not just top-level names.
assert hasattr(thing, attribute_name)
source_value = getattr(thing, attribute_name)
setattr(thing, attribute_name, LazyTranslator(
course.app_context, key, source_value, translation_dict))
def is_translation_required():
"""Returns True if current locale is different from the course default."""
app_context = sites.get_course_for_current_request()
if not app_context:
return False
default_locale = app_context.default_locale
current_locale = app_context.get_current_locale()
if not current_locale:
return False
return current_locale != default_locale
@appengine_config.timeandlog('translate_lessons')
def translate_lessons(course, locale):
lesson_list = course.get_lessons_for_all_units()
key_list = [
str(ResourceBundleKey(
resources_display.ResourceLesson.TYPE, lesson.lesson_id, locale))
for lesson in lesson_list]
bundle_list = I18nResourceBundleManager.get_multi(
course.app_context, key_list)
for key, lesson, bundle in zip(key_list, lesson_list, bundle_list):
if bundle is not None:
for name, translation_dict in bundle.dict.items():
set_attribute(course, key, lesson, name, translation_dict)
@appengine_config.timeandlog('translate_units')
def translate_units(course, locale):
unit_list = course.get_units()
key_list = []
for unit in unit_list:
key = resources_display.ResourceUnitBase.key_for_unit(unit, course)
key_list.append(ResourceBundleKey(key.type, key.key, locale))
bundle_list = I18nResourceBundleManager.get_multi(
course.app_context, key_list)
unit_tools = resources_display.UnitTools(course)
for key, unit, bundle in zip(key_list, unit_list, bundle_list):
if bundle is None:
continue
schema = key.resource_key.get_schema(course)
data_dict = unit_tools.unit_to_dict(unit, keys=bundle.dict.keys())
binding = schema_fields.ValueToTypeBinding.bind_entity_to_schema(
data_dict, schema)
for name, translation_dict in bundle.dict.items():
source_value = binding.name_to_value[name].value
binding.name_to_value[name].value = LazyTranslator(
course.app_context, key, source_value, translation_dict)
errors = []
unit_tools.apply_updates(unit, data_dict, errors)
@appengine_config.timeandlog('translate_html_hooks', duration_only=True)
def translate_html_hooks(html_hooks_dict):
if not is_translation_required():
return
app_context = sites.get_course_for_current_request()
course = courses.Course(None, app_context=app_context)
locale = app_context.get_current_locale()
key_list = [
ResourceBundleKey(utils.ResourceHtmlHook.TYPE, name, locale) for
name in html_hooks_dict.iterkeys()]
bundle_list = I18nResourceBundleManager.get_multi(app_context, key_list)
for key, bundle in zip(key_list, bundle_list):
if bundle is None:
continue
schema = utils.ResourceHtmlHook.get_schema(None, None)
hook_name = key.resource_key.key
values = utils.ResourceHtmlHook.to_data_dict(
hook_name, html_hooks_dict[hook_name])
binding = schema_fields.ValueToTypeBinding.bind_entity_to_schema(
values, schema)
for name, translation_dict in bundle.dict.items():
source_value = binding.name_to_value[name].value
binding.name_to_value[name].value = LazyTranslator(
app_context, key, source_value, translation_dict)
html_hooks_dict[hook_name] = values[utils.ResourceHtmlHook.CONTENT]
@appengine_config.timeandlog('translate_course', duration_only=True)
def translate_course(course):
if not is_translation_required():
return
models.MemcacheManager.begin_readonly()
try:
app_context = sites.get_course_for_current_request()
translate_units(course, app_context.get_current_locale())
translate_lessons(course, app_context.get_current_locale())
finally:
models.MemcacheManager.end_readonly()
def translate_course_env(env):
if not is_translation_required():
return
app_context = sites.get_course_for_current_request()
locale = app_context.get_current_locale()
key_list = [
ResourceBundleKey(
resources_display.ResourceCourseSettings.TYPE, key, locale)
for key in courses.Course.get_schema_sections()]
bundle_list = I18nResourceBundleManager.get_multi(app_context, key_list)
course = courses.Course.get(app_context)
for key, bundle in zip(key_list, bundle_list):
if bundle is None:
continue
schema = key.resource_key.get_schema(course)
binding = schema_fields.ValueToTypeBinding.bind_entity_to_schema(
env, schema)
for name, translation_dict in bundle.dict.items():
field = binding.name_to_value[name]
source_value = field.value
field.value = LazyTranslator(
app_context, key, source_value, translation_dict)
def translate_dto_list(course, dto_list, resource_key_list):
if not is_translation_required():
return
app_context = sites.get_course_for_current_request()
locale = app_context.get_current_locale()
key_list = [
ResourceBundleKey(key.type, key.key, locale)
for key in resource_key_list]
bundle_list = I18nResourceBundleManager.get_multi(app_context, key_list)
for key, dto, bundle in zip(key_list, dto_list, bundle_list):
if bundle is None:
continue
schema = key.resource_key.get_schema(course)
binding = schema_fields.ValueToTypeBinding.bind_entity_to_schema(
dto.dict, schema)
for name, translation_dict in bundle.dict.items():
source_value = binding.name_to_value[name].value
binding.name_to_value[name].value = LazyTranslator(
app_context, key, source_value, translation_dict)
def translate_question_dto(dto_list):
if not is_translation_required():
return
key_list = []
app_context = sites.get_course_for_current_request()
course = courses.Course.get(app_context)
for dto in dto_list:
qu_type = resources_display.ResourceQuestionBase.get_question_key_type(
dto)
key_list.append(resource.Key(qu_type, dto.id))
translate_dto_list(course, dto_list, key_list)
def translate_question_group_dto(dto_list):
if not is_translation_required():
return
app_context = sites.get_course_for_current_request()
course = courses.Course.get(app_context)
key_list = [
resource.Key(resources_display.ResourceQuestionGroup.TYPE, dto.id)
for dto in dto_list]
translate_dto_list(course, dto_list, key_list)
def has_locale_rights(app_context, locale):
return roles.Roles.is_user_allowed(
app_context, dashboard.custom_module, ACCESS_PERMISSION
) and roles.Roles.is_user_allowed(
app_context, custom_module, locale_to_permission(locale)
)
def locale_to_permission(locale):
return 'translate_%s' % locale
def permissions_callback(app_context):
for locale in app_context.get_environ().get('extra_locales', []):
yield roles.Permission(
locale_to_permission(locale['locale']),
'Can submit translations for the locale "%s".' % locale['locale']
)
BABEL_ESCAPES = {
'n': '\n',
't': '\t',
'r': '\r'
}
def denormalize(s):
def reify_escapes(text):
ret = []
text_iter = iter(text)
for c in text_iter:
if c == '\\':
escaped_char = text_iter.next()
ret.append(BABEL_ESCAPES.get(escaped_char, escaped_char))
else:
ret.append(c)
return ''.join(ret)
return ''.join(reify_escapes(line[1:-1]) for line in s.splitlines())
def notify_module_enabled():
TranslatableResourceRegistry.register(TranslatableResourceCourseSettings)
TranslatableResourceRegistry.register(TranslatableResourceCourseComponents)
TranslatableResourceRegistry.register(TranslatableResourceQuestions)
TranslatableResourceRegistry.register(TranslatableResourceQuestionGroups)
TranslatableResourceRegistry.register(TranslatableResourceHtmlHooks)
dashboard.DashboardHandler.add_nav_mapping(
I18nDashboardHandler.ACTION, 'I18N')
dashboard.DashboardHandler.add_external_permission(
ACCESS_PERMISSION, ACCESS_PERMISSION_DESCRIPTION)
roles.Roles.register_permissions(
custom_module, permissions_callback)
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(ResourceBundleEntity)
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(I18nProgressEntity)
I18nDashboardHandler.register()
I18nDeletionHandler.register()
I18nDownloadHandler.register()
I18nUploadHandler.register()
I18nReverseCaseHandler.register()
TranslationConsole.register()
courses.Course.POST_LOAD_HOOKS.append(translate_course)
courses.Course.COURSE_ENV_POST_LOAD_HOOKS.append(translate_course_env)
models.QuestionDAO.POST_LOAD_HOOKS.append(translate_question_dto)
models.QuestionGroupDAO.POST_LOAD_HOOKS.append(translate_question_group_dto)
transforms.CUSTOM_JSON_ENCODERS.append(LazyTranslator.json_encode)
utils.ApplicationHandler.EXTRA_GLOBAL_CSS_URLS.append(GLOBAL_CSS)
utils.HtmlHooks.POST_LOAD_CALLBACKS.append(translate_html_hooks)
unit_lesson_editor.LessonRESTHandler.POST_SAVE_HOOKS.append(
I18nProgressDeferredUpdater.on_lesson_changed)
unit_lesson_editor.CommonUnitRESTHandler.POST_SAVE_HOOKS.append(
I18nProgressDeferredUpdater.on_unit_changed)
models.QuestionDAO.POST_SAVE_HOOKS.append(
I18nProgressDeferredUpdater.on_questions_changed)
models.QuestionGroupDAO.POST_SAVE_HOOKS.append(
I18nProgressDeferredUpdater.on_question_groups_changed)
courses.Course.COURSE_ENV_POST_SAVE_HOOKS.append(
I18nProgressDeferredUpdater.on_course_settings_changed)
# Implementation in Babel 0.9.6 is buggy; replace with corrected version.
pofile.denormalize = denormalize
def register_module():
"""Registers this module in the registry."""
global_routes = [
(os.path.join(RESOURCES_PATH, 'js', '.*'), tags.JQueryHandler),
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
namespaced_routes = [
(TranslationConsoleRestHandler.URL, TranslationConsoleRestHandler),
(TranslationDeletionRestHandler.URL, TranslationDeletionRestHandler),
(TranslationDownloadRestHandler.URL, TranslationDownloadRestHandler),
(TranslationUploadRestHandler.URL, TranslationUploadRestHandler),
(IsTranslatableRestHandler.URL, IsTranslatableRestHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'I18N Dashboard Module',
'A module provide i18n workflow.',
global_routes, namespaced_routes,
notify_module_enabled=notify_module_enabled)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = [
'Neema Kotonya (neemak@google.com)',
'Gun Pinyo (gunpinyo@google.com)'
]
import os
from xml.etree import cElementTree
import appengine_config
from common import schema_fields
from common import tags
from controllers import sites
from models import custom_modules
MATH_MODULE_URI = '/modules/math'
RESOURCES_URI = MATH_MODULE_URI + '/resources'
MATHJAX_URI = MATH_MODULE_URI + '/MathJax'
class MathTag(tags.ContextAwareTag):
"""Custom tag for mathematical notation using MathJax."""
binding_name = 'gcb-math'
@classmethod
def name(cls):
return 'Mathematical Formula'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, context):
math_script = cElementTree.XML('<script/>')
# The formula is "text" type in the schema and so is presented in the
# tag's body.
math_script.text = node.text
input_type = node.attrib.get('input_type')
if input_type == 'MML':
math_script.set('type', 'math/mml')
else:
math_script.set('type', 'math/tex')
return math_script
def rollup_header_footer(self, context):
"""Include MathJax library only when a math tag is present."""
header = tags.html_string_to_element_tree("""
<script src="%s/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>""" % MATHJAX_URI)
footer = tags.html_string_to_element_tree('')
return (header, footer)
def get_icon_url(self):
return RESOURCES_URI + '/math.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(MathTag.name())
reg.add_property(
schema_fields.SchemaField(
'input_type', 'Type', 'string', i18n=False,
optional=True,
select_data=[('TeX', 'TeX'), ('MML', 'MML')],
extra_schema_dict_values={'value': 'TeX'},
description=('Select either "TeX" or "MML" to write math'
' script in TeX or MathML respectively.')))
reg.add_property(
schema_fields.SchemaField(
'formula', 'Mathematical formula', 'text',
optional=True,
description=('Provide mathematical script'
' which will be displayed')))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(MathTag.binding_name)
def on_module_enable():
tags.Registry.add_tag_binding(MathTag.binding_name, MathTag)
global_routes = [
(RESOURCES_URI + '/.*', tags.ResourcesHandler),
(MATHJAX_URI + '/(fonts/.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-fonts-2.3.0.zip'))),
(MATHJAX_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-2.3.0.zip')))]
namespaced_routes = []
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Mathematical Formula Display',
'Provides a custom tag to embed mathematical formulas using TeX or MML.'
, global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A feature to include questionnaires in lessons.
Usage:
Include a form in the HTML of a lesson and give the form a unique id.
E.g.,
<form id="questionnaire-1">
<label>Name: <input name="name"></label><br>
Sex:
<label>Female <input name="sex" type="radio" value="female"></label>
<label>Male <input name="sex" type="radio" value="male"></label>
</form>
Then add a Questionnaire tag to the page, using the custom component toolbox
in the rich text editor. Enter the id of the form into the tag. This will
add a button to your page which the student presses to submit their
responses. Responses are stored, and are shown and can be edited on
subsequent visits to the page.
"""
__author__ = 'Neema Kotonya (neemak@google.com)'
import os
from xml.etree import cElementTree
import appengine_config
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import custom_modules
from models import data_sources
from models import models
from models import transforms
RESOURCES_PATH = '/modules/questionnaire/resources'
QUESTIONNAIRE_XSRF_TOKEN_NAME = 'questionnaire'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'questionnaire', 'templates')
class QuestionnaireTag(tags.ContextAwareTag):
"""A custom tag to manage submission and repopulation of questionnaires."""
binding_name = 'gcb-questionnaire'
@classmethod
def name(cls):
return 'Questionnaire'
@classmethod
def vendor(cls):
return 'gcb'
def get_icon_url(self):
return '/modules/questionnaire/resources/img/icon.png'
def render(self, node, context):
"""Renders the submit button."""
xsrf_token = XsrfTokenManager.create_xsrf_token(
QUESTIONNAIRE_XSRF_TOKEN_NAME)
form_id = node.attrib.get('form-id')
button_label = node.attrib.get('button-label')
disabled = (node.attrib.get('disabled') == 'true')
post_message = node.text
user = context.handler.get_user()
registered = False
if user and models.Student.get_enrolled_student_by_email(user.email()):
registered = True
template_vals = {
'xsrf_token': xsrf_token,
'form_id': form_id,
'button_label': button_label,
'disabled': disabled,
'registered': registered,
'post_message': post_message,
}
template = jinja_utils.get_template(
'questionnaire.html', [TEMPLATES_DIR])
button = template.render(template_vals)
return tags.html_string_to_element_tree(button)
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry('Questionnaire')
reg.add_property(
schema_fields.SchemaField(
'form-id', 'Form ID', 'string', optional=True, i18n=False,
description=(
'Enter a unique ID for this form. Note this must be '
'unique across your whole course. Then use this ID '
'as the ID attribute of your form element.')))
reg.add_property(
schema_fields.SchemaField(
'button-label', 'Button Label', 'string', optional=True,
i18n=True, description=(
'Text to be shown on submit button.')))
reg.add_property(
schema_fields.SchemaField(
'disabled', 'Disabled', 'boolean', optional=True,
description=(
'Use this option to render the form with data but leave '
'all the fields disabled. This is used to display the '
'results of a questionnaire on a different page.')))
reg.add_property(
schema_fields.SchemaField(
'post-message', 'Post Message', 'text', optional=True,
i18n=True, description=(
'Text shown to the student after they submit their '
'responses.')))
return reg
def rollup_header_footer(self, context):
header = cElementTree.Comment('Empty header')
footer = cElementTree.Element('script')
footer.attrib['src'] = (
'/modules/questionnaire/resources/js/questionnaire.js')
return (header, footer)
class StudentFormEntity(models.StudentPropertyEntity):
@classmethod
def load_or_create(cls, student, form_id):
entity = cls.get(student, form_id)
if entity is None:
entity = cls.create(student, form_id)
entity.value = '{}'
entity.put()
return entity
class QuestionnaireHandler(BaseRESTHandler):
"""The REST Handler provides GET and PUT methods for the form data."""
URL = '/rest/modules/questionnaire'
SCHEMA = {
'type': 'object',
'properties': {
'form_data': {'type': 'string', 'optional': 'true'}
}
}
def get(self):
"""GET method is called when the page with the questionnaire loads."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, QUESTIONNAIRE_XSRF_TOKEN_NAME, {}):
return
user = self.get_user()
if user is None:
return
student = models.Student.get_enrolled_student_by_email(user.email())
if student is None:
return
entity = StudentFormEntity.load_or_create(student, key)
if entity.value is None:
return
form_dict = transforms.loads(entity.value)
transforms.send_json_response(
self, 200, None,
payload_dict=transforms.dict_to_json(form_dict, self.SCHEMA))
def post(self):
"""POST method called when the student submits answers."""
# I18N: Message to show the user was not allowed to access to resource
access_denied = self.gettext('Access denied.')
# I18N: Message to acknowledge successful submission of the
# questionnaire
response_submitted = self.gettext('Response submitted.')
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, QUESTIONNAIRE_XSRF_TOKEN_NAME, {}):
return
user = self.get_user()
if user is None:
transforms.send_json_response(self, 401, access_denied, {})
return
student = models.Student.get_enrolled_student_by_email(user.email())
if student is None:
transforms.send_json_response(self, 401, access_denied, {})
return
payload_json = request.get('payload')
payload_dict = transforms.json_to_dict(payload_json, self.SCHEMA)
form_data = StudentFormEntity.load_or_create(student, key)
form_data.value = transforms.dumps(payload_dict)
form_data.put()
transforms.send_json_response(self, 200, response_submitted)
class QuestionnaireDataSource(data_sources.AbstractDbTableRestDataSource):
"""Data source to export all questions responses for all students."""
@classmethod
def get_name(cls):
return 'questionnaire_responses'
@classmethod
def get_title(cls):
return 'Questionnaire Responses'
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry('Questionnaire Response',
description='Course sub-components')
reg.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string',
description='Student ID encrypted with a session-specific key'))
reg.add_property(schema_fields.SchemaField(
'questionnaire_id', 'Questionnaire ID', 'string',
description='ID of questionnaire.'))
form_data = schema_fields.FieldRegistry(None, 'form_data')
form_data.add_property(schema_fields.SchemaField(
'name', 'Field Name', 'string', optional=True,
description='The questionnaire field name.'))
form_data.add_property(schema_fields.SchemaField(
'value', 'Field Value', 'string', optional=True,
description='The student response in the questionnaire field.'))
reg.add_property(schema_fields.FieldArray(
'form_data', 'Form Data', item_type=form_data))
return reg.get_json_schema_dict()['properties']
@classmethod
def get_entity_class(cls):
return StudentFormEntity
@classmethod
def _postprocess_rows(cls, unused_app_context, source_context,
unused_schema, unused_log, unused_page_number, form_entities):
def to_string(value):
if value is None or isinstance(value, basestring):
return value
else:
return str(value)
transform_fn = cls._build_transform_fn(source_context)
response_list = []
for entity in form_entities:
student_id, questionnaire_id = entity.key().name().split('-', 1)
form_data = [
{
'name': to_string(item.get('name')),
'value': to_string(item.get('value'))
} for item in (
transforms.loads(entity.value).get('form_data', []))]
response_list.append({
'user_id': transform_fn(student_id),
'questionnaire_id': questionnaire_id,
'form_data': form_data
})
return response_list
questionnaire_module = None
def register_module():
def on_module_enabled():
tags.Registry.add_tag_binding(
QuestionnaireTag.binding_name, QuestionnaireTag)
data_sources.Registry.register(QuestionnaireDataSource)
global_routes = [
(os.path.join(RESOURCES_PATH, 'js', '.*'), tags.JQueryHandler),
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
namespaced_routes = [(QuestionnaireHandler.URL, QuestionnaireHandler)]
global questionnaire_module # pylint: disable=global-statement
questionnaire_module = custom_modules.Module(
'Questionnaire',
'Can create a questionnaire for students to answer.'
'The responses submitted by students will be saved as a form which can'
'be reviewed at a later date.',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enabled)
return questionnaire_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Gun Pinyo (gunpinyo@google.com)'
import os
from xml.etree import cElementTree
import appengine_config
from common import schema_fields
from common import tags
from controllers import sites
from models import custom_modules
from modules.oeditor import oeditor
CODETAGS_MODULE_URI = '/modules/code_tags'
CODETAGS_RESOURCES_URI = CODETAGS_MODULE_URI + '/resources'
CODEMIRROR_URI = CODETAGS_MODULE_URI + '/codemirror'
SELECT_DATA = [
# codemirror does not have plain text mode
# however by setting an incorrect mode it will default to plain text
('', 'Plain Text'),
('htmlmixed', 'Html'),
('javascript', 'JavaScript'),
('css', 'CSS'),
('python', 'Python'),
('ruby', 'Ruby'),
('shell', 'Shell'),
('xml', 'XML'),
('xquery', 'XQuery'),
('yaml', 'Yaml'),
('perl', 'Perl'),
('php', 'PHP'),
('coffeescript', 'CoffeeScript'),
('clike', 'C (and relative)'),
('apl', 'apl'),
('asterisk', 'asterisk'),
('clojure', 'clojure'),
('cobol', 'cobol'),
('commonlisp', 'commonlisp'),
('cypher', 'cypher'),
('d', 'd'),
('diff', 'diff'),
('django', 'django'),
('dtd', 'dtd'),
('dylan', 'dylan'),
('ecl', 'ecl'),
('eiffel', 'eiffel'),
('erlang', 'erlang'),
('fortran', 'fortran'),
('gas', 'gas'),
('gfm', 'gfm'),
('gherkin', 'gherkin'),
('go', 'go'),
('groovy', 'groovy'),
('haml', 'haml'),
('haskell', 'haskell'),
('haxe', 'haxe'),
('htmlembedded', 'htmlembedded'),
('http', 'http'),
('jade', 'jade'),
('jinja2', 'jinja2'),
('julia', 'julia'),
('kotlin', 'kotlin'),
('livescript', 'livescript'),
('lua', 'lua'),
('markdown', 'markdown'),
('mirc', 'mirc'),
('mllike', 'mllike'),
('nginx', 'nginx'),
('ntriples', 'ntriples'),
('octave', 'octave'),
('pascal', 'pascal'),
('pegjs', 'pegjs'),
('pig', 'pig'),
('properties', 'properties'),
('puppet', 'puppet'),
('q', 'q'),
('r', 'r'),
('rpm', 'rpm'),
('rst', 'rst'),
('rust', 'rust'),
('sass', 'sass'),
('scheme', 'scheme'),
('sieve', 'sieve'),
('slim', 'slim'),
('smalltalk', 'smalltalk'),
('smarty', 'smarty'),
('smartymixed', 'smartymixed'),
('solr', 'solr'),
('sparql', 'sparql'),
('sql', 'sql'),
('stex', 'stex'),
('tcl', 'tcl'),
('tiddlywiki', 'tiddlywiki'),
('tiki', 'tiki'),
('toml', 'toml'),
('turtle', 'turtle'),
('vb', 'vb'),
('vbscript', 'vbscript'),
('velocity', 'velocity'),
('verilog', 'verilog'),
('z80', 'z80'),
]
class CodeTag(tags.ContextAwareTag):
"""Custom tag for showing piece of code using CodeMirror."""
binding_name = 'gcb-code'
@classmethod
def name(cls):
return 'Embedded Code'
@classmethod
def vendor(cls):
return 'gcb'
@classmethod
def extra_js_files(cls):
if oeditor.CAN_HIGHLIGHT_CODE.value:
return ['code_tags_popup.js']
else:
return []
@classmethod
def additional_dirs(cls):
return [os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'code_tags', 'resources')]
def render(self, node, context):
code_elt = cElementTree.Element('code')
code_elt.text = node.text or ''
code_elt.set('class', 'codemirror-container-readonly')
code_elt.set('data-mode', node.attrib.get('mode'))
return code_elt
def rollup_header_footer(self, context):
"""Include CodeMirror library only when a code tag is present."""
if oeditor.CAN_HIGHLIGHT_CODE.value:
header = tags.html_string_to_element_tree(
'<script src="%s/lib/codemirror.js"></script>'
'<link rel="stylesheet" href="%s/lib/codemirror.css">'
'<script src="%s/addon/mode/loadmode.js"></script>'
'<link rel="stylesheet" href="%s/code_tags.css">' % (
CODEMIRROR_URI, CODEMIRROR_URI, CODEMIRROR_URI,
CODETAGS_RESOURCES_URI))
footer = tags.html_string_to_element_tree(
'<script src="%s/code_tags.js">'
'</script>' % CODETAGS_RESOURCES_URI)
else:
header = cElementTree.Element('link')
header.attrib['rel'] = 'stylesheet'
header.attrib['href'] = '%s/code_tags_no_highlight.css' % (
CODETAGS_RESOURCES_URI)
footer = cElementTree.Comment('Empty footer')
return (header, footer)
def get_icon_url(self):
return CODETAGS_RESOURCES_URI + '/code_tags.png'
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(CodeTag.name())
reg.add_property(
schema_fields.SchemaField(
'mode', 'Language', 'string',
optional=True,
select_data=SELECT_DATA))
reg.add_property(
schema_fields.SchemaField(
'code', 'Code', 'text',
optional=True,
description=('The code which will be displayed.')))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_enable():
tags.Registry.add_tag_binding(CodeTag.binding_name, CodeTag)
def on_module_disable():
tags.Registry.remove_tag_binding(CodeTag.binding_name)
global_routes = [
(CODETAGS_RESOURCES_URI + '/.*', tags.ResourcesHandler),
(CODEMIRROR_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/codemirror-4.5.0.zip')))]
namespaced_routes = []
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Code Editor and Code Example Display',
'Allow teacher to use a proper code editor and'
'allow student to see a proper piece of code',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enable,
notify_module_disabled=on_module_disable)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module allowing manual marking of unit/lesson progress."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import utils as common_utils
from controllers import utils
from models import custom_modules
from models import transforms
custom_module = None
MODULE_NAME = 'Manual Progress'
XSRF_ACTION = 'manual_progress'
# pylint: disable=unbalanced-tuple-unpacking
class ProgressRESTBase(utils.BaseRESTHandler):
def _perform_checks(self):
success = False
key = self.request.params.get('key')
student = self.get_student()
course = self.get_course()
if not self.assert_xsrf_token_or_fail(
self.request.params, XSRF_ACTION, {'key': key}):
pass
elif not key:
transforms.send_json_response(
self, 400, 'Bad Request.', {})
elif not student or student.is_transient or not student.is_enrolled:
transforms.send_json_response(
self, 401, 'Access Denied.', {'key': key})
elif not course:
transforms.send_json_response(
self, 400, 'Bad Request.', {'key': key})
elif not self.app_context.is_editable_fs():
transforms.send_json_response(
self, 401, 'Access Denied.', {'key': key})
else:
success = True
return success, key, student, course
def _send_success_response(self, key, status):
transforms.send_json_response(
self, 200, 'OK.', {'key': key,
'status': status})
class CourseProgressRESTHandler(ProgressRESTBase):
URI = '/rest/student/progress/course'
def _perform_checks(self):
progress = None
success, key, student, course = (
super(CourseProgressRESTHandler, self)._perform_checks())
if success:
progress = course.get_progress_tracker()
return success, key, student, progress
def _send_success_response(self, key, student, progress):
super(CourseProgressRESTHandler, self)._send_success_response(
key,
progress.get_course_status(
progress.get_or_create_progress(student)))
def get(self):
success, key, student, progress = self._perform_checks()
if success:
self._send_success_response(key, student, progress)
def post(self):
success, key, student, progress = self._perform_checks()
if success:
progress.force_course_completed(student)
self._send_success_response(key, student, progress)
class UnitProgressRESTHandler(ProgressRESTBase):
URI = '/rest/student/progress/unit'
def _perform_checks(self):
unit = None
progress = None
success, key, student, course = (
super(UnitProgressRESTHandler, self)._perform_checks())
if success:
progress = course.get_progress_tracker()
unit = course.find_unit_by_id(key)
if not unit:
success = False
transforms.send_json_response(
self, 400, 'Bad Request.', {'key': key})
return success, key, student, unit, progress
def _send_success_response(self, key, student, unit, progress):
super(UnitProgressRESTHandler, self)._send_success_response(
key,
progress.get_unit_status(
progress.get_or_create_progress(student),
unit.unit_id))
def get(self):
success, key, student, unit, progress = self._perform_checks()
if success:
self._send_success_response(key, student, unit, progress)
def post(self):
success, key, student, unit, progress = self._perform_checks()
if success:
if not unit.manual_progress:
success = False
transforms.send_json_response(
self, 401, 'Access Denied.', {'key': key})
else:
progress.force_unit_completed(student, unit.unit_id)
self._send_success_response(key, student, unit, progress)
class LessonProgressRESTHandler(ProgressRESTBase):
URI = '/rest/student/progress/lesson'
def _perform_checks(self):
lesson = None
progress = None
success, key, student, course = (
super(LessonProgressRESTHandler, self)._perform_checks())
if success:
progress = course.get_progress_tracker()
lesson = common_utils.find(lambda l: str(l.lesson_id) == key,
course.get_lessons_for_all_units())
if not lesson:
success = False
transforms.send_json_response(
self, 400, 'Bad Request.', {'key': key})
return success, key, student, lesson, progress
def _send_success_response(self, key, student, lesson, progress):
super(LessonProgressRESTHandler, self)._send_success_response(
key,
progress.get_lesson_status(
progress.get_or_create_progress(student),
lesson.unit_id,
lesson.lesson_id))
def get(self):
success, key, student, lesson, progress = self._perform_checks()
if success:
self._send_success_response(key, student, lesson, progress)
def post(self):
success, key, student, lesson, progress = self._perform_checks()
if success:
if not lesson.manual_progress:
success = False
transforms.send_json_response(
self, 401, 'Access Denied.', {'key': key})
else:
progress.put_html_completed(
student, lesson.unit_id, lesson.lesson_id)
self._send_success_response(key, student, lesson, progress)
def register_module():
namespaced_handlers = [
(CourseProgressRESTHandler.URI, CourseProgressRESTHandler),
(UnitProgressRESTHandler.URI, UnitProgressRESTHandler),
(LessonProgressRESTHandler.URI, LessonProgressRESTHandler),
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_NAME,
'Manual marking of unit/lesson progress',
[], namespaced_handlers)
return custom_module
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that adds the ability to do ajax requests."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
from common import tags
from models import custom_modules
MODULE_NAME = 'Ajax Registry Library'
# Module registration
custom_module = None
def register_module():
"""Registers this module in the registry."""
global_routes = [
('/modules/ajax_registry/assets/.*', tags.ResourcesHandler)
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_NAME, 'Provides library to register ajax calls',
global_routes, [])
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the admin panel."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
def assemble_sanitized_message(text, link):
node_list = safe_dom.NodeList()
if text:
node_list.append(safe_dom.Text(text))
if link:
node_list.append(safe_dom.Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
COURSES_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateNewCourse')
DEPLOYMENT_DESCRIPTION = assemble_sanitized_message("""
These deployment settings are configurable by editing the Course Builder code
before uploading it to Google App Engine.
""", 'https://code.google.com/p/course-builder/wiki/AdminPage')
METRICS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/AdminPage')
SETTINGS_DESCRIPTION = assemble_sanitized_message(
None, 'https://code.google.com/p/course-builder/wiki/AdminPage')
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting configuration property editor and REST operations."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import urllib
import appengine_config
from common.utils import Namespace
from controllers import sites
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import models
from models import roles
from models import transforms
from modules.oeditor import oeditor
from google.appengine.api import users
from google.appengine.ext import db
# This is a template because the value type is not yet known.
SCHEMA_JSON_TEMPLATE = """
{
"id": "Configuration Property",
"type": "object",
"description": "Configuration Property Override",
"properties": {
"name" : {"type": "string"},
"value": {"optional": true, "type": "%s"},
"is_draft": {"type": "boolean"}
}
}
"""
# This is a template because the doc_string is not yet known.
SCHEMA_ANNOTATIONS_TEMPLATE = [
(['title'], 'Configuration Property Override'),
(['properties', 'name', '_inputex'], {
'label': 'Name', '_type': 'uneditable'}),
oeditor.create_bool_select_annotation(
['properties', 'is_draft'], 'Status', 'Pending', 'Active',
description='<strong>Active</strong>: This value is active and '
'overrides all other defaults.<br/><strong>Pending</strong>: This '
'value is not active yet, and the default settings still apply.')]
class ConfigPropertyRights(object):
"""Manages view/edit rights for configuration properties."""
@classmethod
def can_view(cls):
return cls.can_edit()
@classmethod
def can_edit(cls):
return roles.Roles.is_super_admin()
@classmethod
def can_delete(cls):
return cls.can_edit()
@classmethod
def can_add(cls):
return cls.can_edit()
class ConfigPropertyEditor(object):
"""An editor for any configuration property."""
# Map of configuration property type into inputex type.
type_map = {str: 'string', int: 'integer', bool: 'boolean'}
@classmethod
def get_schema_annotations(cls, config_property):
"""Gets editor specific schema annotations."""
doc_string = '%s Default: \'%s\'.' % (
config_property.doc_string, config_property.default_value)
item_dict = [] + SCHEMA_ANNOTATIONS_TEMPLATE
item_dict.append((
['properties', 'value', '_inputex'], {
'label': 'Value', '_type': '%s' % cls.get_value_type(
config_property),
'description': doc_string}))
return item_dict
@classmethod
def get_value_type(cls, config_property):
"""Gets an editor specific type for the property."""
value_type = cls.type_map[config_property.value_type]
if not value_type:
raise Exception('Unknown type: %s', config_property.value_type)
if config_property.value_type == str and config_property.multiline:
return 'text'
return value_type
@classmethod
def get_schema_json(cls, config_property):
"""Gets JSON schema for configuration property."""
return SCHEMA_JSON_TEMPLATE % cls.get_value_type(config_property)
def get_add_course(self):
"""Handles 'add_course' action and renders new course entry editor."""
if roles.Roles.is_super_admin():
exit_url = '%s?tab=courses' % self.LINK_URL
else:
exit_url = self.request.referer
rest_url = CoursesItemRESTHandler.URI
template_values = {}
template_values['page_title'] = self.format_title('Add Course')
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, CoursesItemRESTHandler.SCHEMA_JSON,
CoursesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Add New Course')
self.render_page(template_values)
def get_config_edit(self):
"""Handles 'edit' property action."""
key = self.request.get('name')
if not key:
self.redirect('%s?action=settings' % self.URL)
item = config.Registry.registered[key]
if not item:
self.redirect('%s?action=settings' % self.URL)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
exit_url = '%s?tab=settings#%s' % (self.LINK_URL, cgi.escape(key))
rest_url = '/rest/config/item'
delete_url = '%s?%s' % (
self.LINK_URL,
urllib.urlencode({
'action': 'config_reset',
'name': key,
'xsrf_token': cgi.escape
(self.create_xsrf_token('config_reset'))}))
template_values['main_content'] = oeditor.ObjectEditor.get_html_for(
self, ConfigPropertyEditor.get_schema_json(item),
ConfigPropertyEditor.get_schema_annotations(item),
key, rest_url, exit_url, delete_url=delete_url)
self.render_page(template_values, in_tab='settings')
def post_config_override(self):
"""Handles 'override' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('?tab=settings' % self.LINK_URL)
with Namespace(appengine_config.DEFAULT_NAMESPACE_NAME):
# Add new entity if does not exist.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
except db.BadKeyError:
entity = None
if not entity:
entity = config.ConfigPropertyEntity(key_name=name)
entity.value = str(item.value)
entity.is_draft = True
entity.put()
models.EventEntity.record(
'override-property', users.get_current_user(),
transforms.dumps({
'name': name, 'value': str(entity.value)}))
self.redirect('%s?%s' % (self.URL, urllib.urlencode(
{'action': 'config_edit', 'name': name})))
def post_config_reset(self):
"""Handles 'reset' property action."""
name = self.request.get('name')
# Find item in registry.
item = None
if name and name in config.Registry.registered.keys():
item = config.Registry.registered[name]
if not item:
self.redirect('%s?tab=settings' % self.LINK_URL)
with Namespace(appengine_config.DEFAULT_NAMESPACE_NAME):
# Delete if exists.
try:
entity = config.ConfigPropertyEntity.get_by_key_name(name)
if entity:
old_value = entity.value
entity.delete()
models.EventEntity.record(
'delete-property', users.get_current_user(),
transforms.dumps({
'name': name, 'value': str(old_value)}))
except db.BadKeyError:
pass
self.redirect('%s?tab=settings' % self.URL)
class CoursesPropertyRights(object):
"""Manages view/edit rights for configuration properties."""
@classmethod
def can_add(cls):
if roles.Roles.is_super_admin():
return True
for course_context in sites.get_all_courses():
if roles.Roles.is_course_admin(course_context):
return True
return False
class CoursesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for course entries."""
URI = '/rest/courses/item'
SCHEMA_JSON = """
{
"id": "Course Entry",
"type": "object",
"description": "Course Entry",
"properties": {
"name": {"type": "string"},
"title": {"type": "string"},
"admin_email": {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'New Course Entry'),
(['properties', 'name', '_inputex'], {'label': 'Unique Name'}),
(['properties', 'title', '_inputex'], {'label': 'Course Title'}),
(['properties', 'admin_email', '_inputex'], {
'label': 'Course Admin Email'})]
def get(self):
"""Handles HTTP GET verb."""
if not CoursesPropertyRights.can_add():
transforms.send_json_response(
self, 401, 'Access denied.')
return
transforms.send_json_response(
self, 200, 'Success.',
payload_dict={
'name': 'new_course',
'title': 'My New Course',
'admin_email': self.get_user().email()},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'add-course-put'))
def put(self):
"""Handles HTTP PUT verb."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'add-course-put', {}):
return
if not CoursesPropertyRights.can_add():
transforms.send_json_response(
self, 401, 'Access denied.')
return
payload = request.get('payload')
json_object = transforms.loads(payload)
name = json_object.get('name')
title = json_object.get('title')
admin_email = json_object.get('admin_email')
# Add the new course entry.
errors = []
entry = sites.add_new_course_entry(name, title, admin_email, errors)
if not entry:
errors.append('Error adding a new course entry.')
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# We can't expect our new configuration being immediately available due
# to datastore queries consistency limitations. So we will instantiate
# our new course here and not use the normal sites.get_all_courses().
app_context = sites.get_all_courses(entry)[0]
# Update course with a new title and admin email.
new_course = courses.Course(None, app_context=app_context)
if not new_course.init_new_course_settings(title, admin_email):
transforms.send_json_response(
self, 412,
'Added new course entry, but failed to update title and/or '
'admin email. The course.yaml file already exists and must be '
'updated manually.')
return
transforms.send_json_response(
self, 200, 'Added.', {'entry': entry})
class ConfigPropertyItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a configuration property."""
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
if not ConfigPropertyRights.can_view():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
else:
entity_dict = {'name': key, 'is_draft': entity.is_draft}
entity_dict['value'] = transforms.string_to_value(
entity.value, item.value_type)
json_payload = transforms.dict_to_json(
entity_dict,
transforms.loads(
ConfigPropertyEditor.get_schema_json(item)))
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'config-property-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'config-property-put', {'key': key}):
return
if not ConfigPropertyRights.can_edit():
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = None
if key and key in config.Registry.registered.keys():
item = config.Registry.registered[key]
if not item:
self.redirect('/admin?action=settings')
try:
entity = config.ConfigPropertyEntity.get_by_key_name(key)
except db.BadKeyError:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
json_object = transforms.loads(payload)
new_value = item.value_type(json_object['value'])
# Validate the value.
errors = []
if item.validator:
item.validator(new_value, errors)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
# Update entity.
old_value = entity.value
entity.value = str(new_value)
entity.is_draft = json_object['is_draft']
entity.put()
if item.after_change:
item.after_change(item, old_value)
models.EventEntity.record(
'put-property', users.get_current_user(), transforms.dumps({
'name': key,
'before': str(old_value), 'after': str(entity.value)}))
transforms.send_json_response(self, 200, 'Saved.')
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Site administration functionality."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import cgi
import cStringIO
import datetime
import os
import sys
import time
import urllib
import messages
import appengine_config
from common import jinja_utils
from common import safe_dom
from common import tags
from common import utils as common_utils
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import ReflectiveRequestHandler
import models
from models import config
from models import counters
from models import courses
from models import custom_modules
from models import roles
from models.config import ConfigProperty
import modules.admin.config
from modules.admin.config import ConfigPropertyEditor
from modules.dashboard import dashboard
from modules.dashboard import tabs
from modules.oeditor import oeditor
from google.appengine.api import users
import google.appengine.api.app_identity as app
RESOURCES_PATH = '/modules/admin/resources'
TEMPLATE_DIR = os.path.join(appengine_config.BUNDLE_ROOT, 'modules', 'admin')
DIRECT_CODE_EXECUTION_UI_ENABLED = False
# A time this module was initialized.
BEGINNING_OF_TIME = time.time()
DELEGATED_ACCESS_IS_NOT_ALLOWED = """
You must be an actual admin user to continue.
Users with the delegated admin rights are not allowed."""
def escape(text):
"""Escapes HTML in text."""
if text:
return cgi.escape(text)
return text
def evaluate_python_code(code):
"""Compiles and evaluates a Python script in a restricted environment."""
code = code.replace('\r\n', '\n')
save_stdout = sys.stdout
results_io = cStringIO.StringIO()
try:
sys.stdout = results_io
try:
compiled_code = compile(code, '<string>', 'exec')
exec(compiled_code, globals()) # pylint: disable=exec-used
except Exception as e: # pylint: disable=broad-except
results_io.write('Error: %s' % e)
return results_io.getvalue(), False
finally:
sys.stdout = save_stdout
return results_io.getvalue(), True
class WelcomeHandler(ApplicationHandler, ReflectiveRequestHandler):
default_action = 'welcome'
get_actions = [default_action]
post_actions = ['explore_sample', 'add_first_course', 'configure_settings']
# Enable other modules to make changes to sample course import.
# Each member must be a function of the form:
# callback(course, errors)
COPY_SAMPLE_COURSE_HOOKS = []
# Enable other modules to put global warnings on the welcome page. This
# is useful when you want to ask for permission from the installation
# administrator, and you want to be absolutely certain the administrator
# has seen the request. Items appended here must be callable, taking
# no parameters. The return value will be inserted onto the welcome.html
# page; see the loop adding 'item_form_content' to the page.
WELCOME_FORM_HOOKS = []
# Items on this list are called back when the welcome page has been
# submitted. These should take two parameters: the course just created
# and the page handler object.
POST_HOOKS = []
def get_template(self, template_name):
return jinja_utils.get_template(template_name, [TEMPLATE_DIR])
def can_view(self):
"""Checks if current user has viewing rights."""
action = self.request.get('action')
if action == 'add_first_course':
return modules.admin.config.CoursesPropertyRights.can_add()
return roles.Roles.is_super_admin()
def can_edit(self):
"""Checks if current user has editing rights."""
return self.can_view()
def get(self):
user = users.get_current_user()
if not user:
self.redirect(
users.create_login_url('/admin/welcome'), normalize=False)
return
if not self.can_view():
return
super(WelcomeHandler, self).get()
def post(self):
if not self.can_edit():
return
app_context = super(WelcomeHandler, self).post()
common_utils.run_hooks(self.POST_HOOKS, app_context, self)
def _redirect(self, app_context, url):
self.app_context = app_context
self.redirect(url)
def get_welcome(self):
template_values = {}
template_values['version'] = os.environ['GCB_PRODUCT_VERSION']
template_values['course_count'] = len(sites.get_all_courses())
template_values['add_first_xsrf'] = self.create_xsrf_token(
'add_first_course')
template_values['explore_sample_xsrf'] = self.create_xsrf_token(
'explore_sample')
template_values['configure_settings_xsrf'] = self.create_xsrf_token(
'configure_settings')
template_values['global_admin_url'] = GlobalAdminHandler.LINK_URL
welcome_form_content = []
for hook in self.WELCOME_FORM_HOOKS:
welcome_form_content.append(hook())
template_values['welcome_form_content'] = welcome_form_content
self.response.write(
self.get_template('welcome.html').render(template_values))
def _make_new_course(self, uid, title):
"""Make a new course entry."""
errors = []
admin_email = users.get_current_user().email()
entry = sites.add_new_course_entry(
uid, title, admin_email, errors)
if errors:
raise Exception(errors)
app_context = sites.get_all_courses(entry)[0]
new_course = models.courses.Course(None, app_context=app_context)
new_course.init_new_course_settings(title, admin_email)
return app_context
def _copy_sample_course(self, uid):
"""Make a fresh copy of sample course."""
src_app_context = sites.get_all_courses('course:/:/:')[0]
dst_app_context = self._make_new_course(uid, '%s (%s)' % (
src_app_context.get_title(), os.environ['GCB_PRODUCT_VERSION']))
errors = []
dst_course = courses.Course(None, dst_app_context)
dst_course.import_from(src_app_context, errors)
dst_course.save()
if not errors:
common_utils.run_hooks(
self.COPY_SAMPLE_COURSE_HOOKS, dst_app_context, errors)
if errors:
raise Exception(errors)
return dst_app_context
def post_explore_sample(self):
"""Navigate to or import sample course."""
course = None
for uid in ['sample', 'sample_%s' % os.environ[
'GCB_PRODUCT_VERSION'].replace('.', '_')]:
course = sites.get_course_index(
).get_app_context_for_namespace('ns_%s' % uid)
if not course:
course = self._copy_sample_course(uid)
break
assert course is not None
self._redirect(course, '/dashboard')
return course
def post_add_first_course(self):
"""Adds first course to the deployment."""
uid = 'first'
course = sites.get_course_index().get_course_for_path('/%s' % uid)
if course:
self._redirect(course, '/dashboard')
return course
course = self._make_new_course(uid, 'My First Course')
self._redirect(course, '/dashboard')
return course
def post_configure_settings(self):
self.redirect('/admin/global')
class BaseAdminHandler(ConfigPropertyEditor):
"""Base class holding methods required for administration of site."""
ACTION = 'admin'
DEFAULT_TAB = 'courses'
default_tab_action = 'admin'
@classmethod
def bind_tabs(cls):
def bind(key, label, handler, href=None):
if href:
target = '_blank'
else:
href = 'admin?action=admin&tab=%s' % key
target = None
tabs.Registry.register(
cls.ACTION, key, label, contents=handler, href=href,
target=target)
bind('courses', 'Courses', cls.get_courses)
bind('settings', 'Site Settings', cls.get_settings)
bind('perf', 'Metrics', cls.get_perf)
bind('deployment', 'Deployment', cls.get_deployment)
if DIRECT_CODE_EXECUTION_UI_ENABLED:
bind('console', 'Console', cls.get_console)
if appengine_config.gcb_appstats_enabled():
bind('stats', 'Appstats', None, href='/admin/stats/')
if appengine_config.PRODUCTION_MODE:
app_id = app.get_application_id()
href = (
'https://appengine.google.com/'
'dashboard?app_id=s~%s' % app_id)
bind('gae', 'Google App Engine', None, href=href)
else:
bind(
'gae', 'Google App Engine', None,
href='http://localhost:8000/')
bind('welcome', 'Welcome', None, href='/admin/welcome')
bind(
'help', 'Site Help', None,
href='https://code.google.com/p/course-builder/wiki/AdminPage')
bind(
'news', 'News', None,
href=(
'https://groups.google.com/forum/'
'?fromgroups#!forum/course-builder-announce'))
@classmethod
def unbind_tabs(cls):
tabs.Registry.unregister_group(cls.ACTION)
@classmethod
def bind_get_actions(cls):
cls.get_actions.append(cls.ACTION)
cls.get_actions.append('add_course')
cls.get_actions.append('config_edit')
@classmethod
def unbind_get_actions(cls):
cls.get_actions.remove(cls.ACTION)
cls.get_actions.remove('add_course')
cls.get_actions.remove('config_edit')
@classmethod
def bind_post_actions(cls):
cls.post_actions.append('config_override')
cls.post_actions.append('config_reset')
if DIRECT_CODE_EXECUTION_UI_ENABLED:
cls.post_actions.append('console_run')
@classmethod
def unbind_post_actions(cls):
cls.post_actions.remove('config_override')
cls.post_actions.remove('config_reset')
if DIRECT_CODE_EXECUTION_UI_ENABLED:
cls.post_actions.remove('console_run')
def can_view(self, unused_action):
"""Checks if current user has viewing rights."""
# Overrides method in DashboardHandler
action = self.request.get('action')
if action == 'add_course':
return modules.admin.config.CoursesPropertyRights.can_add()
return roles.Roles.is_super_admin()
def can_edit(self):
"""Checks if current user has editing rights."""
# Overrides method in DashboardHandler
return self.can_view(self.ACTION)
def get_course_picker(self, destination=None):
return super(BaseAdminHandler, self).get_course_picker(
destination='/admin')
def get_admin(self):
"""The main entry point to the admin console."""
tab = tabs.Registry.get_tab(
self.ACTION, self.request.get('tab') or self.DEFAULT_TAB)
if tab:
tab.contents(self)
else:
self.error(404)
def render_dict(self, source_dict, title):
"""Renders a dictionary ordered by keys."""
keys = sorted(source_dict.keys())
content = safe_dom.NodeList()
content.append(safe_dom.Element('h3').add_text(title))
ol = safe_dom.Element('ol')
content.append(ol)
for key in keys:
value = source_dict[key]
if isinstance(value, ConfigProperty):
value = value.value
ol.add_child(
safe_dom.Element('li').add_text('%s: %s' % (key, value)))
return content
def get_perf(self):
"""Shows server performance counters page."""
template_values = {}
template_values['page_title'] = self.format_title('Metrics')
template_values['page_description'] = messages.METRICS_DESCRIPTION
perf_counters = {}
# built in counters
perf_counters['gcb-admin-uptime-sec'] = long(
time.time() - BEGINNING_OF_TIME)
# config counters
perf_counters['gcb-config-overrides'] = len(
config.Registry.get_overrides())
perf_counters['gcb-config-age-sec'] = (
long(time.time()) - config.Registry.last_update_time)
perf_counters['gcb-config-update-time-sec'] = (
config.Registry.last_update_time)
perf_counters['gcb-config-update-index'] = config.Registry.update_index
# add all registered counters
all_counters = counters.Registry.registered.copy()
for name in all_counters.keys():
global_value = all_counters[name].global_value
if not global_value:
global_value = 'NA'
perf_counters[name] = '%s / %s' % (
all_counters[name].value, global_value)
template_values['main_content'] = self.render_dict(
perf_counters, 'In-process Performance Counters (local/global)')
self.render_page(template_values)
def _make_routes_dom(self, parent_element, routes, caption):
"""Renders routes as DOM."""
if routes:
# sort routes
all_routes = []
for route in routes:
if route:
all_routes.append(str(route))
# render as DOM
ul = safe_dom.Element('ul')
parent_element.add_child(ul)
ul.add_child(safe_dom.Element('li').add_text(caption))
ul2 = safe_dom.Element('ul')
ul.add_child(ul2)
for route in sorted(all_routes):
if route:
ul2.add_child(safe_dom.Element('li').add_text(route))
def get_deployment(self):
"""Shows server environment and deployment information page."""
template_values = {}
template_values['page_title'] = self.format_title('Deployment')
template_values['page_description'] = messages.DEPLOYMENT_DESCRIPTION
# modules
module_content = safe_dom.NodeList()
module_content.append(
safe_dom.Element('h3').add_text('Modules'))
ol = safe_dom.Element('ol')
module_content.append(ol)
for name in sorted(custom_modules.Registry.registered_modules.keys()):
enabled_text = ''
if name not in custom_modules.Registry.enabled_module_names:
enabled_text = ' (disabled)'
li = safe_dom.Element('li').add_text('%s%s' % (name, enabled_text))
ol.add_child(li)
amodule = custom_modules.Registry.registered_modules.get(name)
self._make_routes_dom(
li, amodule.global_routes, 'Global Routes')
self._make_routes_dom(
li, amodule.namespaced_routes, 'Namespaced Routes')
# Custom tags.
tag_content = safe_dom.NodeList()
tag_content.append(
safe_dom.Element('h3').add_text('Custom Tags'))
ol = safe_dom.Element('ol')
tag_content.append(ol)
tag_bindings = tags.get_tag_bindings()
for name in sorted(tag_bindings.keys()):
clazz = tag_bindings.get(name)
tag = clazz()
vendor = tag.vendor()
ol.add_child(safe_dom.Element('li').add_text(
'%s: %s: %s' % (name, tag.__class__.__name__, vendor)))
# Yaml file content.
yaml_content = safe_dom.NodeList()
yaml_content.append(
safe_dom.Element('h3').add_text('Contents of ').add_child(
safe_dom.Element('code').add_text('app.yaml')))
ol = safe_dom.Element('ol')
yaml_content.append(ol)
yaml_lines = open(os.path.join(os.path.dirname(
__file__), '../../app.yaml'), 'r').readlines()
for line in yaml_lines:
ol.add_child(safe_dom.Element('li').add_text(line))
# Application identity.
app_id = app.get_application_id()
app_dict = {}
app_dict['application_id'] = escape(app_id)
app_dict['default_ver_hostname'] = escape(
app.get_default_version_hostname())
template_values['main_content'] = safe_dom.NodeList().append(
self.render_dict(app_dict, 'About the Application')
).append(
module_content
).append(
tag_content
).append(
yaml_content
).append(
self.render_dict(os.environ, 'Server Environment Variables'))
self.render_page(template_values)
def get_settings(self):
"""Shows configuration properties information page."""
template_values = {}
template_values['page_title'] = self.format_title('Settings')
template_values['page_description'] = messages.SETTINGS_DESCRIPTION
content = safe_dom.NodeList()
content.append(safe_dom.Element(
'link', rel='stylesheet',
href='/modules/admin/resources/css/admin.css'))
table = safe_dom.Element('table', className='gcb-config').add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('th').add_text('Name')
).add_child(
safe_dom.Element('th').add_text('Current Value')
).add_child(
safe_dom.Element('th').add_text('Actions')
).add_child(
safe_dom.Element('th').add_text('Description')
))
content.append(
safe_dom.Element('h3').add_text('All Settings')
).append(table)
def get_style_for(value, value_type):
"""Formats CSS style for given value."""
style = ''
if not value or value_type in [int, long, bool]:
style = 'text-align: center;'
return style
def get_action_html(caption, args, onclick=None, idName=None):
"""Formats actions <a> link."""
a = safe_dom.Element(
'a', href='%s?%s' % (
self.LINK_URL, urllib.urlencode(args)),
className='gcb-button'
).add_text(caption)
if onclick:
a.add_attribute(onclick=onclick)
if idName:
a.add_attribute(id=idName)
return a
def get_actions(name, override):
"""Creates actions appropriate to an item."""
if override:
return get_action_html('Edit', {
'action': 'config_edit', 'name': name}, idName=name)
else:
return safe_dom.Element(
'form',
action='%s?%s' % (
self.LINK_URL,
urllib.urlencode(
{'action': 'config_override', 'name': name})),
method='POST'
).add_child(
safe_dom.Element(
'input', type='hidden', name='xsrf_token',
value=self.create_xsrf_token('config_override'))
).add_child(
safe_dom.Element(
'button', className='gcb-button', type='submit', id=name
).add_text('Override'))
def get_doc_string(item, default_value):
"""Formats an item documentation string for display."""
doc_string = item.doc_string
if not doc_string:
doc_string = 'No documentation available.'
if isinstance(doc_string, safe_dom.NodeList) or isinstance(
doc_string, safe_dom.Node):
return safe_dom.NodeList().append(doc_string).append(
safe_dom.Text(' Default: \'%s\'.' % default_value))
doc_string = ' %s Default: \'%s\'.' % (doc_string, default_value)
return safe_dom.Text(doc_string)
def get_lines(value):
"""Convert \\n line breaks into <br> and escape the lines."""
escaped_value = safe_dom.NodeList()
for line in str(value).split('\n'):
escaped_value.append(
safe_dom.Text(line)).append(safe_dom.Element('br'))
return escaped_value
# get fresh properties and their overrides
unused_overrides = config.Registry.get_overrides(force_update=True)
registered = config.Registry.registered.copy()
db_overrides = config.Registry.db_overrides.copy()
names_with_draft = config.Registry.names_with_draft.copy()
count = 0
for name in sorted(registered.keys()):
count += 1
item = registered[name]
has_environ_value, unused_environ_value = item.get_environ_value()
# figure out what kind of override this is
class_current = ''
if has_environ_value:
class_current = 'gcb-env-diff'
if item.name in db_overrides:
class_current = 'gcb-db-diff'
if item.name in names_with_draft:
class_current = 'gcb-db-draft'
# figure out default and current value
default_value = item.default_value
value = item.value
if default_value:
default_value = str(default_value)
if value:
value = str(value)
style_current = get_style_for(value, item.value_type)
tr = safe_dom.Element('tr')
table.add_child(tr)
tr.add_child(
safe_dom.Element(
'td', style='white-space: nowrap;').add_text(item.name))
td_value = safe_dom.Element('td').add_child(get_lines(value))
if style_current:
td_value.add_attribute(style=style_current)
if class_current:
td_value.add_attribute(className=class_current)
tr.add_child(td_value)
tr.add_child(
safe_dom.Element(
'td', style='white-space: nowrap;', align='center'
).add_child(get_actions(
name, name in db_overrides or name in names_with_draft)))
tr.add_child(
safe_dom.Element(
'td').add_child(get_doc_string(item, default_value)))
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element(
'td', colspan='4', align='right'
).add_text('Total: %s item(s)' % count)))
content.append(
safe_dom.Element('p').add_child(
safe_dom.Element('strong').add_text('Legend')
).add_text(':').add_text("""
For each property, the value shown corresponds to, in
descending order of priority:
""").add_child(
safe_dom.Element('span', className='gcb-db-diff').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the value override set via this page ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_child(
safe_dom.Element('span', className='gcb-db-draft').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the default value with pending value override ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_child(
safe_dom.Element('span', className='gcb-env-diff').add_child(
safe_dom.Entity(' ')
).add_text(
'[ the environment value in app.yaml ]'
).add_child(safe_dom.Entity(' '))
).add_text(', ').add_text("""
and the [ default value ] in the Course Builder codebase.
"""))
template_values['main_content'] = content
self.render_page(template_values)
def get_courses(self):
"""Shows a list of all courses available on this site."""
template_values = {}
template_values['page_title'] = self.format_title('Courses')
template_values['page_description'] = messages.COURSES_DESCRIPTION
content = safe_dom.NodeList()
content.append(
safe_dom.Element(
'a', id='add_course', className='gcb-button gcb-pull-right',
role='button', href='%s?action=add_course' % self.LINK_URL
).add_text('Add Course')
).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;')
).append(
safe_dom.Element('h3').add_text('All Courses')
)
table = safe_dom.Element('table')
content.append(table)
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('th').add_text('Course Title')
).add_child(
safe_dom.Element('th').add_text('Context Path')
).add_child(
safe_dom.Element('th').add_text('Content Location')
).add_child(
safe_dom.Element('th').add_text('Student Data Location')
)
)
count = 0
for course in sorted(
sites.get_all_courses(),
key=lambda course: course.get_title().lower()):
count += 1
error = safe_dom.Text('')
slug = course.get_slug()
name = course.get_title()
if course.fs.is_read_write():
location = 'namespace: %s' % course.get_namespace_name()
else:
location = 'disk: %s' % sites.abspath(
course.get_home_folder(), '/')
if slug == '/':
link = '/dashboard'
else:
link = '%s/dashboard' % slug
link = safe_dom.Element('a', href=link).add_text(name)
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('td').add_child(link).add_child(error)
).add_child(
safe_dom.Element('td').add_text(slug)
).add_child(
safe_dom.Element('td').add_text(location)
).add_child(
safe_dom.Element('td').add_text(
'namespace: %s' % course.get_namespace_name())
))
table.add_child(
safe_dom.Element('tr').add_child(
safe_dom.Element('td', colspan='4', align='right').add_text(
'Total: %s item(s)' % count)))
template_values['main_content'] = content
self.render_page(template_values)
def get_console(self):
"""Shows interactive Python console page."""
template_values = {}
template_values['page_title'] = self.format_title('Console')
# Check rights.
if not roles.Roles.is_direct_super_admin():
template_values['main_content'] = DELEGATED_ACCESS_IS_NOT_ALLOWED
self.render_page(template_values)
return
content = safe_dom.NodeList()
content.append(
safe_dom.Element('p').add_child(
safe_dom.Element('i').add_child(
safe_dom.Element('strong').add_text('WARNING!')
).add_text("""
The Interactive Console has the same
access to the application's environment and services as a .py file
inside the application itself. Be careful, because this means writes
to your data store will be executed for real!""")
)
).append(
safe_dom.Element('p').add_child(
safe_dom.Element('strong').add_text("""
Input your Python code below and press "Run Program" to execute.""")
)
).append(
safe_dom.Element(
'form',
action='%s?action=console_run' % self.LINK_URL,
method='POST'
).add_child(
safe_dom.Element(
'input', type='hidden', name='xsrf_token',
value=self.create_xsrf_token('console_run'))
).add_child(
safe_dom.Element(
'textarea', style='width: 95%; height: 200px;',
name='code')
).add_child(
safe_dom.Element('p', align='center').add_child(
safe_dom.Element(
'button', className='gcb-button', type='submit'
).add_text('Run Program')
)
)
)
template_values['main_content'] = content
self.render_page(template_values)
def post_console_run(self):
"""Executes dynamically submitted Python code."""
template_values = {}
template_values['page_title'] = self.format_title('Execution Results')
# Check rights.
if not roles.Roles.is_direct_super_admin():
template_values['main_content'] = DELEGATED_ACCESS_IS_NOT_ALLOWED
self.render_page(template_values)
return
# Execute code.
code = self.request.get('code')
time_before = time.time()
output, results = evaluate_python_code(code)
duration = long(time.time() - time_before)
status = 'FAILURE'
if results:
status = 'SUCCESS'
# Render results.
content = safe_dom.NodeList()
content.append(
safe_dom.Element('h3').add_text('Submitted Python Code'))
ol = safe_dom.Element('ol')
content.append(ol)
for line in code.split('\n'):
ol.add_child(safe_dom.Element('li').add_text(line))
content.append(
safe_dom.Element('h3').add_text('Execution Results')
).append(
safe_dom.Element('ol').add_child(
safe_dom.Element('li').add_text('Status: %s' % status)
).add_child(
safe_dom.Element('li').add_text('Duration (sec): %s' % duration)
)
).append(
safe_dom.Element('h3').add_text('Program Output')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text(output))
)
template_values['main_content'] = content
self.render_page(template_values, in_tab='console')
class AdminHandler(BaseAdminHandler, dashboard.DashboardHandler):
"""Handler to present admin settings in namespaced context."""
# The binding URL for this handler
URL = '/admin'
# The URL used in relative addreses of this handler
LINK_URL = 'admin'
def format_title(self, text):
return super(AdminHandler, self).format_title(
'Admin > %s' % text)
def get_template(self, template_name, dirs):
return super(AdminHandler, self).get_template(
template_name, [TEMPLATE_DIR] + dirs)
def render_page(self, template_values, in_action=None, in_tab=None):
in_action = in_action or self.ACTION
in_tab = in_tab or self.request.get('tab') or self.DEFAULT_TAB
super(AdminHandler, self).render_page(
template_values, in_action=in_action, in_tab=in_tab)
class GlobalAdminHandler(
BaseAdminHandler, ApplicationHandler, ReflectiveRequestHandler):
"""Handler to present admin settings in global context."""
# The binding URL for this handler
URL = '/admin/global'
# The URL used in relative addreses of this handler
LINK_URL = '/admin/global'
# List of functions which are used to generate content displayed at the top
# of every dashboard page. Use this with caution, as it is extremely
# invasive of the UX. Each function receives the handler as arg and returns
# an object to be inserted into a Jinja template (e.g. a string, a safe_dom
# Node or NodeList, or a jinja2.Markup).
PAGE_HEADER_HOOKS = []
default_action = 'admin'
get_actions = [default_action]
post_actions = []
def format_title(self, text):
return 'Course Builder > Admin > %s' % text
def _get_top_nav(self, in_action, in_tab):
nav_bars = []
nav = safe_dom.NodeList()
nav.append(safe_dom.Element(
'a', href=self.URL, className='selected'
).add_text('Site Admin'))
nav.append(safe_dom.Element(
'a',
href='https://code.google.com/p/course-builder/wiki/Dashboard',
target='_blank'
).add_text('Help'))
nav.append(safe_dom.Element(
'a',
href=(
'https://groups.google.com/forum/?fromgroups#!categories/'
'course-builder-forum/general-troubleshooting'),
target='_blank'
).add_text('Support'))
nav_bars.append(nav)
tab_name = in_tab or self.request.get('tab') or self.DEFAULT_TAB
sub_nav = safe_dom.NodeList()
tab_group = tabs.Registry.get_tab_group(self.default_action)
for tab in tab_group:
if tab.contents:
href = '%s?tab=%s' % (self.LINK_URL, tab.name)
else:
href = tab.href
target = tab.target or '_self'
sub_nav.append(
safe_dom.A(
href,
className=('selected' if tab.name == tab_name else ''),
target=target)
.add_text(tab.title))
nav_bars.append(sub_nav)
return nav_bars
def get(self):
tab = self.request.get('tab')
if tab:
destination = '%s?tab=%s' % (self.LINK_URL, tab)
else:
destination = self.LINK_URL
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(destination), normalize=False)
return
if not self.can_view(self.ACTION):
if appengine_config.PRODUCTION_MODE:
self.error(403)
else:
self.redirect(
users.create_login_url(destination), normalize=False)
return
# Force reload of properties. It's expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
super(GlobalAdminHandler, self).get()
def post(self):
if not self.can_edit():
self.redirect('/', normalize=False)
return
return super(GlobalAdminHandler, self).post()
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
dashboard_template_dir = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'dashboard')
return jinja_utils.get_template(
template_name, dirs + [dashboard_template_dir], handler=self)
def render_page(self, template_values, in_action=None, in_tab=None):
page_title = template_values['page_title']
template_values['header_title'] = page_title
template_values['page_headers'] = [
hook(self) for hook in self.PAGE_HEADER_HOOKS]
template_values['breadcrumbs'] = page_title
template_values['top_nav'] = self._get_top_nav(in_action, in_tab)
template_values['gcb_course_base'] = '/'
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout'))
template_values[
'page_footer'] = 'Page created on: %s' % datetime.datetime.now()
template_values['coursebuilder_version'] = (
os.environ['GCB_PRODUCT_VERSION'])
template_values['application_id'] = app.get_application_id()
template_values['application_version'] = (
os.environ['CURRENT_VERSION_ID'])
template_values['can_highlight_code'] = oeditor.CAN_HIGHLIGHT_CODE.value
if not template_values.get('sections'):
template_values['sections'] = []
self.response.write(
self.get_template('view.html', []).render(template_values))
def notify_module_enabled():
BaseAdminHandler.bind_tabs()
AdminHandler.bind_get_actions()
AdminHandler.bind_post_actions()
GlobalAdminHandler.bind_get_actions()
GlobalAdminHandler.bind_post_actions()
def notify_module_disabled():
BaseAdminHandler.unbind_tabs()
AdminHandler.unbind_get_actions()
AdminHandler.unbind_post_actions()
GlobalAdminHandler.unbind_get_actions()
GlobalAdminHandler.unbind_post_actions()
custom_module = None
def register_module():
"""Registers this module in the registry."""
global_handlers = [
(GlobalAdminHandler.URL, GlobalAdminHandler),
('/admin/welcome', WelcomeHandler),
('/rest/config/item', (
modules.admin.config.ConfigPropertyItemRESTHandler)),
('/rest/courses/item', modules.admin.config.CoursesItemRESTHandler),
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
namespaced_handlers = [(AdminHandler.URL, AdminHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Site Admin',
'A set of pages for Course Builder site administrator.',
global_handlers, namespaced_handlers,
notify_module_enabled=notify_module_enabled,
notify_module_disabled=notify_module_disabled)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resources to be indexed and searched over by the search module."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import datetime
import gettext
import HTMLParser
import logging
import operator
import os
import Queue
import re
import robotparser
import urllib
import urlparse
from xml.dom import minidom
import jinja2
import appengine_config
from common import jinja_utils
from models import models
from modules.announcements import announcements
from google.appengine.api import search
from google.appengine.api import urlfetch
PROTOCOL_PREFIX = 'http://'
YOUTUBE_DATA_URL = 'https://gdata.youtube.com/feeds/api/videos/'
YOUTUBE_TIMED_TEXT_URL = 'https://youtube.com/api/timedtext'
# The limit (in seconds) for the time that elapses before a new transcript
# fragment should be started. A lower value results in more fine-grain indexing
# and more docs in the index.
YOUTUBE_CAPTION_SIZE_SECS = 30
class URLNotParseableException(Exception):
"""Exception thrown when the resource at a URL cannot be parsed."""
pass
class ResourceHTMLParser(HTMLParser.HTMLParser):
"""Custom parser for processing HTML files."""
IGNORED_TAGS = ['script', 'style']
def __init__(self, url):
HTMLParser.HTMLParser.__init__(self)
self.content_list = []
self._links = []
self._title = ''
self.tag_tracker = collections.Counter()
self.url = url
def handle_starttag(self, tag, attrs):
attrs_dict = dict(attrs)
if tag == 'a' and 'href' in attrs_dict:
self._links.append(urlparse.urljoin(self.url, attrs_dict['href']))
self.tag_tracker[tag] += 1
def handle_endtag(self, tag):
if self.tag_tracker[tag] > 0:
self.tag_tracker[tag] -= 1
def handle_data(self, data):
"""Invoked every time the parser encounters the page's inner content."""
if self.tag_tracker['title']:
if self._title:
self._title += '\n%s' % data
else:
self._title = data
stripped_data = data.strip()
if (not any([self.tag_tracker[tag] for tag in self.IGNORED_TAGS]) and
stripped_data):
self.content_list.append(stripped_data)
def get_content(self):
return '\n'.join(self.content_list)
def get_links(self):
return self._links
def get_title(self):
return self._title
def get_parser_for_html(url, ignore_robots=False):
"""Returns a ResourceHTMLParser with the parsed data."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
parser = ResourceHTMLParser(url)
try:
result = urlfetch.fetch(url)
if (result.status_code in [200, 304] and
any(content_type in result.headers['Content-type'] for
content_type in ['text/html', 'xml'])):
if not isinstance(result.content, unicode):
result.content = result.content.decode('utf-8')
parser.feed(result.content)
else:
raise ValueError
except BaseException as e:
raise URLNotParseableException('Could not parse file at URL: %s\n%s' %
(url, e))
return parser
def get_minidom_from_xml(url, ignore_robots=False):
"""Returns a minidom representation of an XML file at url."""
if not (ignore_robots or _url_allows_robots(url)):
raise URLNotParseableException('robots.txt disallows access to URL: %s'
% url)
try:
result = urlfetch.fetch(url)
except urlfetch.Error as e:
raise URLNotParseableException('Could not parse file at URL: %s. %s' %
(url, e))
if result.status_code not in [200, 304]:
raise URLNotParseableException('Bad status code (%s) for URL: %s' %
(result.status_code, url))
try:
if isinstance(result.content, unicode):
result.content = result.content.encode('utf-8')
xmldoc = minidom.parseString(result.content)
except BaseException as e:
raise URLNotParseableException(
'Error parsing XML document at URL: %s. %s' % (url, e))
return xmldoc
def _url_allows_robots(url):
"""Checks robots.txt for user agent * at URL."""
url = url.encode('utf-8')
try:
parts = urlparse.urlparse(url)
base = urlparse.urlunsplit((
parts.scheme, parts.netloc, '', None, None))
rp = robotparser.RobotFileParser(url=urlparse.urljoin(
base, '/robots.txt'))
rp.read()
except BaseException as e:
logging.info('Could not retreive robots.txt for URL: %s', url)
raise URLNotParseableException(e)
else:
return rp.can_fetch('*', url)
def get_locale_filtered_announcement_list(course):
# TODO(jorr): Restrict search in announcements by all tracking labels,
# not just locale.
announcement_list = (
announcements.AnnouncementEntity.get_announcements())
# pylint: disable=protected-access
return models.LabelDAO._apply_locale_labels_to_locale(
course.app_context.get_current_locale(), announcement_list)
# pylint: enable=protected-access
class Resource(object):
"""Abstract superclass for a resource."""
# Each subclass should define this constant
TYPE_NAME = 'Resource'
# Each subclass should use this constant to define the fields it needs
# returned with a search result.
RETURNED_FIELDS = []
# Each subclass should use this constant to define the fields it needs
# returned as snippets in the search result. In most cases, this should be
# one field.
SNIPPETED_FIELDS = []
# Each subclass should use this constant to define how many days should
# elapse before a resource should be re-indexed. This value should be
# nonnegative.
FRESHNESS_THRESHOLD_DAYS = 0
@classmethod
def generate_all(
cls, course, timestamps): # pylint: disable=unused-argument
"""A generator returning objects of type cls in the course.
This generator should yield resources based on the last indexed time in
timestamps.
Args:
course: models.courses.course. the course to index.
timestamps: dict from doc_ids to last indexed datetimes.
Yields:
A sequence of Resource objects.
"""
# For the superclass, return a generator which immediately halts. All
# implementations in subclasses must also be generators for memory-
# management reasons.
return
yield # pylint: disable=unreachable
@classmethod
def _get_doc_id(cls, *unused_vargs):
"""Subclasses should implement this with identifying fields as args."""
raise NotImplementedError
@classmethod
def _indexed_within_num_days(cls, timestamps, doc_id, num_days):
"""Determines whether doc_id was indexed in the last num_days days."""
try:
timestamp = timestamps[doc_id]
except (KeyError, TypeError):
return False
else:
delta = datetime.datetime.utcnow() - timestamp
return delta <= datetime.timedelta(num_days)
def get_document(self):
"""Return a search.Document to be indexed."""
raise NotImplementedError
def get_links(self):
"""External links to be indexed should be stored in self.links."""
return self.links if hasattr(self, 'links') else []
def get_unit_id(self):
return self.unit_id if hasattr(self, 'unit_id') else None
class Result(object):
"""The abstract superclass for a result returned by the search module."""
def get_html(self):
"""Return an HTML fragment to be used in the results page."""
raise NotImplementedError
@classmethod
def _generate_html_from_template(cls, template_name, template_value):
"""Generates marked-up HTML from template."""
template = jinja_utils.get_template(
template_name,
[os.path.join(appengine_config.BUNDLE_ROOT,
'modules', 'search', 'results_templates')])
return jinja2.Markup(template.render(template_value))
@classmethod
def _get_returned_field(cls, result, field):
"""Returns the value of a field in result, '' if none exists."""
try:
return result[field][0].value
except (KeyError, IndexError, AttributeError):
return ''
@classmethod
def _get_snippet(cls, result):
"""Returns the value of the snippet in result, '' if none exists."""
try:
return result.expressions[0].value
except (AttributeError, IndexError):
return ''
class LessonResource(Resource):
"""A lesson in a course."""
TYPE_NAME = 'Lesson'
RETURNED_FIELDS = ['title', 'unit_id', 'lesson_id', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 3
@classmethod
def generate_all(cls, course, timestamps):
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
doc_id = cls._get_doc_id(lesson.unit_id, lesson.lesson_id)
if (lesson.now_available and unit.now_available and
not cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield LessonResource(lesson)
except HTMLParser.HTMLParseError as e:
logging.info(
'Error parsing objectives for Lesson %s.%s: %s',
lesson.unit_id, lesson.lesson_id, e)
continue
@classmethod
def _get_doc_id(cls, unit_id, lesson_id):
return '%s_%s_%s' % (cls.TYPE_NAME, unit_id, lesson_id)
def __init__(self, lesson):
super(LessonResource, self).__init__()
self.unit_id = lesson.unit_id
self.lesson_id = lesson.lesson_id
self.title = unicode(lesson.title)
if lesson.notes:
self.notes = urlparse.urljoin(PROTOCOL_PREFIX, lesson.notes)
else:
self.notes = ''
if lesson.objectives:
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(unicode(lesson.objectives))
self.content = parser.get_content()
self.links = parser.get_links()
else:
self.content = ''
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.unit_id, self.lesson_id),
fields=[
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=(
'unit?unit=%s&lesson=%s' %
(self.unit_id, self.lesson_id))),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class LessonResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(LessonResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes a lesson link.
lesson_string = gettext.gettext('Lesson')
template_value = {
'result_title': '%s - %s' % (self.title, lesson_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class ExternalLinkResource(Resource):
"""An external link from a course."""
TYPE_NAME = 'ExternalLink'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 15
# TODO(emichael): Allow the user to turn off external links in the dashboard
@classmethod
def generate_all_from_dist_dict(cls, link_dist, link_unit_id, timestamps):
"""Generate all external links from a map from URL to distance.
Args:
link_dist: dict. a map from URL to distance in the link graph from
the course.
link_unit_id: dict. A map from URL to the unit ID under which
the link is found.
timestamps: dict from doc_ids to last indexed datetimes. An empty
dict indicates that all documents should be generated.
Yields:
A sequence of ExternalLinkResource.
"""
url_queue = Queue.LifoQueue()
for url, dist in sorted(link_dist.iteritems(),
key=operator.itemgetter(1)):
url_queue.put(url)
while not url_queue.empty():
url = url_queue.get()
doc_id = cls._get_doc_id(url)
if (cls._indexed_within_num_days(timestamps, doc_id,
cls.FRESHNESS_THRESHOLD_DAYS)):
continue
dist = link_dist[url]
unit_id = link_unit_id.get(url)
if dist > 1:
break
try:
resource = ExternalLinkResource(url, unit_id)
except URLNotParseableException as e:
logging.info(e)
else:
if dist < 1:
for new_link in resource.get_links():
if new_link not in link_dist:
link_dist[new_link] = dist + 1
url_queue.put(new_link)
link_unit_id[new_link] = unit_id
yield resource
def __init__(self, url, unit_id):
# distance is the distance from the course material in the link graph,
# where a lesson notes page has a distance of 0
super(ExternalLinkResource, self).__init__()
self.url = url
self.unit_id = unit_id
parser = get_parser_for_html(url)
self.content = parser.get_content()
self.title = parser.get_title()
self.links = parser.get_links()
@classmethod
def _get_doc_id(cls, url):
return '%s_%s' % (cls.TYPE_NAME, url)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.url),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url', value=self.url),
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class ExternalLinkResult(Result):
"""An object for an external link in the search results."""
def __init__(self, search_result):
super(ExternalLinkResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
class YouTubeFragmentResource(Resource):
"""An object for a YouTube transcript fragment in search results."""
TYPE_NAME = 'YouTubeFragment'
RETURNED_FIELDS = ['title', 'video_id', 'start', 'thumbnail_url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 30
@classmethod
def generate_all(cls, course, timestamps):
"""Generate all YouTubeFragments for a course."""
# TODO(emichael): Handle the existence of a single video in multiple
# places in a course.
youtube_ct_regex = r"""<[ ]*gcb-youtube[^>]+videoid=['"]([^'"]+)['"]"""
for lesson in course.get_lessons_for_all_units():
unit = course.find_unit_by_id(lesson.unit_id)
if not (lesson.now_available and unit.now_available):
continue
lesson_url = 'unit?unit=%s&lesson=%s' % (
lesson.unit_id, lesson.lesson_id)
if lesson.video and not cls._indexed_within_num_days(
timestamps, lesson.video, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
lesson.unit_id, lesson.video, lesson_url):
yield fragment
match = re.search(youtube_ct_regex, unicode(lesson.objectives))
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id, cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
lesson.unit_id, video_id, lesson_url):
yield fragment
if announcements.custom_module.enabled:
for entity in get_locale_filtered_announcement_list(course):
if entity.is_draft:
continue
announcement_url = 'announcements#%s' % entity.key()
match = re.search(youtube_ct_regex, entity.html)
if match:
for video_id in match.groups():
if not cls._indexed_within_num_days(
timestamps, video_id,
cls.FRESHNESS_THRESHOLD_DAYS):
for fragment in cls._get_fragments_for_video(
None, video_id, announcement_url):
yield fragment
@classmethod
def _indexed_within_num_days(cls, timestamps, video_id, num_days):
for doc_id in timestamps:
if doc_id.startswith(cls._get_doc_id(video_id, '')):
return super(
YouTubeFragmentResource, cls)._indexed_within_num_days(
timestamps, doc_id, num_days)
return False
@classmethod
def _get_fragments_for_video(cls, unit_id, video_id, url_in_course):
"""Get all of the transcript fragment docs for a specific video."""
try:
(transcript, title, thumbnail_url) = cls._get_video_data(video_id)
except BaseException as e:
logging.info('Could not parse YouTube video with id %s.\n%s',
video_id, e)
return []
# Aggregate the fragments into YOUTUBE_CAPTION_SIZE_SECS time chunks
fragments = transcript.getElementsByTagName('text')
aggregated_fragments = []
# This parser is only used for unescaping HTML entities
parser = HTMLParser.HTMLParser()
while fragments:
current_start = float(fragments[0].attributes['start'].value)
current_text = []
while (fragments and
float(fragments[0].attributes['start'].value) -
current_start < YOUTUBE_CAPTION_SIZE_SECS):
current_text.append(parser.unescape(
fragments.pop(0).firstChild.nodeValue))
aggregated_fragment = YouTubeFragmentResource(
video_id, unit_id, url_in_course, current_start,
'\n'.join(current_text), title, thumbnail_url)
aggregated_fragments.append(aggregated_fragment)
return aggregated_fragments
@classmethod
def _get_video_data(cls, video_id):
"""Returns (track_minidom, title, thumbnail_url) for a video."""
try:
vid_info = get_minidom_from_xml(
urlparse.urljoin(YOUTUBE_DATA_URL, video_id),
ignore_robots=True)
title = vid_info.getElementsByTagName(
'title')[0].firstChild.nodeValue
thumbnail_url = vid_info.getElementsByTagName(
'media:thumbnail')[0].attributes['url'].value
except (URLNotParseableException, IOError,
IndexError, AttributeError) as e:
logging.error('Could not parse video info for video id %s.\n%s',
video_id, e)
title = ''
thumbnail_url = ''
# TODO(emichael): Handle the existence of multiple tracks
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL,
'?v=%s&type=list' % video_id)
tracklist = get_minidom_from_xml(url, ignore_robots=True)
tracks = tracklist.getElementsByTagName('track')
if not tracks:
raise URLNotParseableException('No tracks for video %s' % video_id)
track_name = tracks[0].attributes['name'].value
track_lang = tracks[0].attributes['lang_code'].value
track_id = tracks[0].attributes['id'].value
url = urlparse.urljoin(YOUTUBE_TIMED_TEXT_URL, urllib.quote(
'?v=%s&lang=%s&name=%s&id=%s' %
(video_id, track_lang, track_name, track_id), '?/=&'))
transcript = get_minidom_from_xml(url, ignore_robots=True)
return (transcript, title, thumbnail_url)
@classmethod
def _get_doc_id(cls, video_id, start_time):
return '%s_%s_%s' % (cls.TYPE_NAME, video_id, start_time)
def __init__(self, video_id, unit_id, url, start, text, video_title,
thumbnail_url):
super(YouTubeFragmentResource, self).__init__()
self.url = url
self.video_id = video_id
self.unit_id = unit_id
self.start = start
self.text = text
self.video_title = video_title
self.thumbnail_url = thumbnail_url
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.video_id, self.start),
fields=[
search.TextField(name='title', value=self.video_title),
search.TextField(name='video_id', value=self.video_id),
search.TextField(
name='unit_id',
value=str(self.unit_id) if self.unit_id else ''),
search.TextField(name='content', value=self.text),
search.NumberField(name='start', value=self.start),
search.TextField(name='thumbnail_url',
value=self.thumbnail_url),
search.TextField(name='url', value=self.url),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class YouTubeFragmentResult(Result):
"""An object for a lesson in search results."""
def __init__(self, search_result):
super(YouTubeFragmentResult, self).__init__()
self.doc_id = search_result.doc_id
self.title = self._get_returned_field(search_result, 'title')
self.video_id = self._get_returned_field(search_result, 'video_id')
self.unit_id = self._get_returned_field(search_result, 'unit_id')
self.start = self._get_returned_field(search_result, 'start')
self.thumbnail_url = self._get_returned_field(search_result,
'thumbnail_url')
self.url = self._get_returned_field(search_result, 'url')
self.snippet = self._get_snippet(search_result)
def get_html(self):
template_value = {
'result_title': self.title,
'result_url': self.url,
'video_id': self.video_id,
'start_time': self.start,
'thumbnail_url': self.thumbnail_url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('youtube.html', template_value)
class AnnouncementResource(Resource):
"""An announcement in a course."""
TYPE_NAME = 'Announcement'
RETURNED_FIELDS = ['title', 'url']
SNIPPETED_FIELDS = ['content']
FRESHNESS_THRESHOLD_DAYS = 1
@classmethod
def generate_all(cls, course, timestamps):
if announcements.custom_module.enabled:
for entity in get_locale_filtered_announcement_list(course):
doc_id = cls._get_doc_id(entity.key())
if not(entity.is_draft or cls._indexed_within_num_days(
timestamps, doc_id, cls.FRESHNESS_THRESHOLD_DAYS)):
try:
yield AnnouncementResource(entity)
except HTMLParser.HTMLParseError as e:
logging.info('Error parsing Announcement %s: %s',
entity.title, e)
continue
def __init__(self, announcement):
super(AnnouncementResource, self).__init__()
self.title = announcement.title
self.key = announcement.key()
parser = ResourceHTMLParser(PROTOCOL_PREFIX)
parser.feed(announcement.html)
self.content = parser.get_content()
@classmethod
def _get_doc_id(cls, key):
return '%s_%s' % (cls.TYPE_NAME, key)
def get_document(self):
return search.Document(
doc_id=self._get_doc_id(self.key),
fields=[
search.TextField(name='title', value=self.title),
search.TextField(name='content', value=self.content),
search.TextField(name='url',
value='announcements#%s' % self.key),
search.TextField(name='type', value=self.TYPE_NAME),
search.DateField(name='date',
value=datetime.datetime.utcnow())])
class AnnouncementResult(Result):
"""An object for an announcement in search results."""
def __init__(self, search_result):
super(AnnouncementResult, self).__init__()
self.url = self._get_returned_field(search_result, 'url')
self.title = self._get_returned_field(search_result, 'title')
self.unit_id = None # Announcements are definitionally not in units.
self.snippet = self._get_snippet(search_result)
def get_html(self):
# I18N: Displayed in search results; denotes an announcement link.
announcement_string = gettext.gettext('Announcement')
template_value = {
'result_title': '%s - %s' % (self.title, announcement_string),
'result_url': self.url,
'result_snippet': jinja2.Markup(self.snippet)
}
return self._generate_html_from_template('basic.html', template_value)
# Register new resource types here
RESOURCE_TYPES = [
(LessonResource, LessonResult),
(ExternalLinkResource, ExternalLinkResult),
(YouTubeFragmentResource, YouTubeFragmentResult),
(AnnouncementResource, AnnouncementResult)
]
def get_returned_fields():
"""Returns a list of fields that should be returned in a search result."""
returned_fields = set(['type'])
for resource_type, unused_result_type in RESOURCE_TYPES:
returned_fields |= set(resource_type.RETURNED_FIELDS)
return list(returned_fields)
def get_snippeted_fields():
"""Returns a list of fields that should be snippeted in a search result."""
snippeted_fields = set()
for resource_type, unused_result_type in RESOURCE_TYPES:
snippeted_fields |= set(resource_type.SNIPPETED_FIELDS)
return list(snippeted_fields)
def generate_all_documents(course, timestamps):
"""A generator for all docs for a given course.
Args:
course: models.courses.Course. the course to be indexed.
timestamps: dict from doc_ids to last indexed datetimes. An empty dict
indicates that all documents should be generated.
Yields:
A sequence of search.Document. If a document is within the freshness
threshold, no document will be generated. This function does not modify
timestamps.
"""
link_dist = {}
link_unit_id = {}
for resource_type, unused_result_type in RESOURCE_TYPES:
for resource in resource_type.generate_all(course, timestamps):
unit_id = resource.get_unit_id()
if isinstance(resource, LessonResource) and resource.notes:
link_dist[resource.notes] = 0
link_unit_id[resource.notes] = unit_id
for link in resource.get_links():
link_dist[link] = 1
link_unit_id[resource.notes] = unit_id
yield resource.get_document()
for resource in ExternalLinkResource.generate_all_from_dist_dict(
link_dist, link_unit_id, timestamps):
yield resource.get_document()
def process_results(results):
"""Generate result objects for the results of a query."""
result_types = {resource_type.TYPE_NAME: result_type
for (resource_type, result_type) in RESOURCE_TYPES}
processed_results = []
for result in results:
try:
result_type = result_types[result['type'][0].value]
processed_results.append(result_type(result))
except (AttributeError, IndexError, KeyError) as e:
# If there is no type information, we cannot process the result
logging.error("%s. Couldn't process result", e)
return processed_results
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search module that uses Google App Engine's full text search."""
__author__ = 'Ellis Michael (emichael@google.com)'
import collections
import gettext
import logging
import math
import mimetypes
import os
import time
import traceback
import jinja2
import resources
import webapp2
import appengine_config
from common import safe_dom
from controllers import sites
from controllers import utils
from models import config
from models import counters
from models import courses
from models import custom_modules
from models import jobs
from models import transforms
from google.appengine.api import namespace_manager
from google.appengine.api import search
from google.appengine.ext import db
MODULE_NAME = 'Full Text Search'
CAN_INDEX_ALL_COURSES_IN_CRON = config.ConfigProperty(
'gcb_can_index_automatically', bool, safe_dom.Text(
'Whether the search module can automatically index the course daily '
'using a cron job. If enabled, this job would index the course '
'incrementally so that only new items or items which have not been '
'recently indexed are indexed.'),
default_value=False)
SEARCH_QUERIES_MADE = counters.PerfCounter(
'gcb-search-queries-made',
'The number of student queries made to the search module.')
SEARCH_RESULTS_RETURNED = counters.PerfCounter(
'gcb-search-results-returned',
'The number of search results returned across all student queries.')
SEARCH_FAILURES = counters.PerfCounter(
'gcb-search-failures',
'The number of search failure messages returned across all student '
'queries.')
INDEX_NAME = 'gcb_search_index_loc_%s'
RESULTS_LIMIT = 10
GCB_SEARCH_FOLDER_NAME = os.path.normpath('/modules/search/')
MAX_RETRIES = 5
# I18N: Message displayed on search results page when error occurs.
SEARCH_ERROR_TEXT = gettext.gettext('Search is currently unavailable.')
class ModuleDisabledException(Exception):
"""Exception thrown when the search module is disabled."""
pass
def get_index(namespace, locale):
assert locale, 'Must have a non-null locale'
return search.Index(name=INDEX_NAME % locale, namespace=namespace)
def index_all_docs(course, incremental):
"""Index all of the docs for a given models.Course object.
Args:
course: models.courses.Course. the course to index.
incremental: boolean. whether or not to index only new or out-of-date
items.
Returns:
A dict with three keys.
'num_indexed_docs' maps to an int, the number of documents added to the
index.
'doc_type' maps to a counter with resource types as keys mapping to the
number of that resource added to the index.
'indexing_time_secs' maps to a float representing the number of seconds
the indexing job took.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
start_time = time.time()
index = get_index(
course.app_context.get_namespace_name(),
course.app_context.get_current_locale())
timestamps, doc_types = (_get_index_metadata(index) if incremental
else ({}, {}))
for doc in resources.generate_all_documents(course, timestamps):
retry_count = 0
while retry_count < MAX_RETRIES:
try:
index.put(doc)
timestamps[doc.doc_id] = doc['date'][0].value
doc_types[doc.doc_id] = doc['type'][0].value
break
except search.Error, e:
if e.results[0].code == search.OperationResult.TRANSIENT_ERROR:
retry_count += 1
if retry_count >= MAX_RETRIES:
logging.error(
'Multiple transient errors indexing doc_id: %s',
doc.doc_id)
else:
logging.error('Failed to index doc_id: %s', doc.doc_id)
break
indexed_doc_types = collections.Counter()
for type_name in doc_types.values():
indexed_doc_types[type_name] += 1
return {'num_indexed_docs': len(timestamps),
'doc_types': indexed_doc_types,
'indexing_time_secs': time.time() - start_time}
def clear_index(namespace, locale):
"""Delete all docs in the index for a given models.Course object."""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(namespace, locale)
doc_ids = [document.doc_id for document in index.get_range(ids_only=True)]
total_docs = len(doc_ids)
while doc_ids:
index.delete(doc_ids)
doc_ids = [document.doc_id
for document in index.get_range(ids_only=True)]
return {'deleted_docs': total_docs}
def _get_index_metadata(index):
"""Returns dict from doc_id to timestamp and one from doc_id to doc_type."""
timestamps = []
doc_types = []
cursor = search.Cursor()
while cursor:
options = search.QueryOptions(
limit=1000,
cursor=cursor,
returned_fields=['date', 'type'])
query = search.Query(query_string='', options=options)
current_docs = index.search(query)
cursor = current_docs.cursor
for doc in current_docs:
timestamps.append((doc.doc_id, doc['date'][0].value))
doc_types.append((doc.doc_id, doc['type'][0].value))
return dict(timestamps), dict(doc_types)
def fetch(course, query_string, offset=0, limit=RESULTS_LIMIT):
"""Return an HTML fragment with the results of a search for query_string.
Args:
course: models.courses.Course. the course to search.
query_string: str. the user's specified query.
offset: int. the number of results to skip.
limit: int. the number of results to return.
Returns:
A dict with two keys.
'results' maps to an ordered list of resources.Result objects.
'total_found' maps to the total number of results in the index which
match query_string.
Raises:
ModuleDisabledException: The search module is currently disabled.
"""
if not custom_module.enabled:
raise ModuleDisabledException('The search module is disabled.')
index = get_index(
course.app_context.get_namespace_name(),
course.app_context.get_current_locale())
try:
# TODO(emichael): Don't compute these for every query
returned_fields = resources.get_returned_fields()
snippeted_fields = resources.get_snippeted_fields()
options = search.QueryOptions(
limit=limit,
offset=offset,
returned_fields=returned_fields,
number_found_accuracy=100,
snippeted_fields=snippeted_fields)
query = search.Query(query_string=query_string, options=options)
results = index.search(query)
except search.Error:
logging.info('Failed searching for: %s', query_string)
return {'results': None, 'total_found': 0}
processed_results = resources.process_results(results)
return {'results': processed_results, 'total_found': results.number_found}
class SearchHandler(utils.BaseHandler):
"""Handler for generating the search results page."""
def get(self):
"""Process GET request."""
# TODO(emichael): move timing to Javascript
if not custom_module.enabled:
self.error(404)
return
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
try:
start = time.time()
# TODO(emichael): Don't use get because it can't handle utf-8
query = self.request.get('query')
offset = self.request.get('offset')
self.template_value['navbar'] = {}
if query:
try:
offset = int(offset)
except (ValueError, TypeError):
offset = 0
self.template_value['query'] = query
SEARCH_QUERIES_MADE.inc()
response = fetch(self.get_course(), query, offset=offset)
response = self.filter(response, student)
self.template_value['time'] = '%.2f' % (time.time() - start)
self.template_value['search_results'] = response['results']
total_found = response['total_found']
if offset + RESULTS_LIMIT < total_found:
self.template_value['next_link'] = (
'search?query=%s&offset=%d' %
(query, offset + RESULTS_LIMIT))
if offset - RESULTS_LIMIT >= 0:
self.template_value['previous_link'] = (
'search?query=%s&offset=%d' %
(query, offset - RESULTS_LIMIT))
self.template_value['page_number'] = offset / RESULTS_LIMIT + 1
self.template_value['total_pages'] = int(math.ceil(
float(total_found) / RESULTS_LIMIT))
if response['results']:
SEARCH_RESULTS_RETURNED.inc(len(response['results']))
# TODO(emichael): Remove this check when the unicode issue is fixed in
# dev_appserver.
except UnicodeEncodeError as e:
SEARCH_FAILURES.inc()
if not appengine_config.PRODUCTION_MODE:
# This message will only be displayed to the course author in
# dev, so it does not need to be I18N'd
self.template_value['search_error'] = (
'There is a known issue in App Engine\'s SDK '
'(code.google.com/p/googleappengine/issues/detail?id=9335) '
'which causes an error when generating search snippets '
'which contain non-ASCII characters. This error does not '
'occur in the production environment, so you can safely '
'run your course with unicode characters on appspot.com.')
logging.error('[Unicode/Dev server issue] Error rendering the '
'search page: %s.', e)
else:
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
except Exception as e: # pylint: disable=broad-except
SEARCH_FAILURES.inc()
self.template_value['search_error'] = SEARCH_ERROR_TEXT
logging.error('Error rendering the search page: %s. %s',
e, traceback.format_exc())
finally:
path = sites.abspath(self.app_context.get_home_folder(),
GCB_SEARCH_FOLDER_NAME)
template = self.get_template('search.html', additional_dirs=[path])
self.template_value['navbar'] = {}
self.response.out.write(template.render(self.template_value))
def filter(self, response, student):
if not response['results']:
return response
filtered_results = []
available_unit_ids = set(
str(unit.unit_id) for unit in
self.get_course().get_track_matching_student(student))
for result in response['results']:
if not result.unit_id or str(result.unit_id) in available_unit_ids:
filtered_results.append(result)
return {
'results': filtered_results,
'total_found': len(filtered_results)
}
class AssetsHandler(webapp2.RequestHandler):
"""Content handler for assets associated with search."""
def get(self):
"""Respond to HTTP GET methods."""
if not custom_module.enabled:
self.error(404)
return
path = self.request.path
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
if os.path.basename(os.path.dirname(path)) != 'assets':
self.error(404)
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
sites.set_static_resource_cache_control(self)
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
stream = open(resource_file)
self.response.write(stream.read())
except IOError:
self.error(404)
class SearchDashboardHandler(object):
"""Should only be inherited by DashboardHandler, not instantiated."""
def get_search(self):
"""Renders course indexing view."""
template_values = {'page_title': self.format_title('Search')}
mc_template_value = {}
mc_template_value['module_enabled'] = custom_module.enabled
indexing_job = IndexCourse(self.app_context).load()
clearing_job = ClearIndex(self.app_context).load()
if indexing_job and (not clearing_job or
indexing_job.updated_on > clearing_job.updated_on):
if indexing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Indexing in progress.'
mc_template_value['job_in_progress'] = True
elif indexing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['indexed'] = True
mc_template_value['last_updated'] = (
indexing_job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT))
mc_template_value['index_info'] = transforms.loads(
indexing_job.output)
elif indexing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Indexing job failed with error: %s' % indexing_job.output)
elif clearing_job:
if clearing_job.status_code in [jobs.STATUS_CODE_STARTED,
jobs.STATUS_CODE_QUEUED]:
mc_template_value['status_message'] = 'Clearing in progress.'
mc_template_value['job_in_progress'] = True
elif clearing_job.status_code == jobs.STATUS_CODE_COMPLETED:
mc_template_value['status_message'] = (
'The index has been cleared.')
elif clearing_job.status_code == jobs.STATUS_CODE_FAILED:
mc_template_value['status_message'] = (
'Clearing job failed with error: %s' % clearing_job.output)
else:
mc_template_value['status_message'] = (
'No indexing job has been run yet.')
mc_template_value['index_course_xsrf_token'] = self.create_xsrf_token(
'index_course')
mc_template_value['clear_index_xsrf_token'] = self.create_xsrf_token(
'clear_index')
template_values['main_content'] = jinja2.Markup(self.get_template(
'search_dashboard.html', [os.path.dirname(__file__)]
).render(mc_template_value, autoescape=True))
self.render_page(template_values)
def post_index_course(self):
"""Submits a new indexing operation."""
try:
incremental = self.request.get('incremental') == 'true'
check_jobs_and_submit(IndexCourse(self.app_context, incremental),
self.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
self.redirect('/dashboard?action=search')
def post_clear_index(self):
"""Submits a new indexing operation."""
try:
check_jobs_and_submit(ClearIndex(self.app_context),
self.app_context)
except db.TransactionFailedError:
# Double submission from multiple browsers, just pass
pass
self.redirect('/dashboard?action=search')
class CronHandler(utils.BaseHandler):
"""Iterates through all courses and starts an indexing job for each one.
All jobs should be submitted through the transactional check_jobs_and_submit
method to prevent multiple index operations from running at the same time.
If an index job is currently running when this cron job attempts to start
one, this operation will be a noop for that course.
"""
def get(self):
"""Start an index job for each course."""
cron_logger = logging.getLogger('modules.search.cron')
self.response.headers['Content-Type'] = 'text/plain'
if CAN_INDEX_ALL_COURSES_IN_CRON.value:
counter = 0
for context in sites.get_all_courses():
namespace = context.get_namespace_name()
counter += 1
try:
check_jobs_and_submit(IndexCourse(context), context)
except db.TransactionFailedError as e:
cron_logger.info(
'Failed to submit job #%s in namespace %s: %s',
counter, namespace, e)
else:
cron_logger.info(
'Index job #%s submitted for namespace %s.',
counter, namespace)
cron_logger.info('All %s indexing jobs started; cron job complete.',
counter)
else:
cron_logger.info('Automatic indexing disabled. Cron job halting.')
self.response.write('OK\n')
@db.transactional(xg=True)
def check_jobs_and_submit(job, app_context):
"""Determines whether an indexing job is running and submits if not."""
indexing_job = IndexCourse(app_context).load()
clearing_job = ClearIndex(app_context).load()
bad_status_codes = [jobs.STATUS_CODE_STARTED, jobs.STATUS_CODE_QUEUED]
if ((indexing_job and indexing_job.status_code in bad_status_codes) or
(clearing_job and clearing_job.status_code in bad_status_codes)):
raise db.TransactionFailedError('Index job is currently running.')
else:
job.non_transactional_submit()
class IndexCourse(jobs.DurableJob):
"""A job that indexes the course."""
@staticmethod
def get_description():
return 'course index'
def __init__(self, app_context, incremental=True):
super(IndexCourse, self).__init__(app_context)
self.incremental = incremental
def run(self):
"""Index the course."""
namespace = namespace_manager.get_namespace()
logging.info('Running indexing job for namespace %s. Incremental: %s',
namespace_manager.get_namespace(), self.incremental)
app_context = sites.get_app_context_for_namespace(namespace)
# Make a request URL to make sites.get_course_for_current_request work
sites.set_path_info(app_context.slug)
indexing_stats = {
'num_indexed_docs': 0,
'doc_types': collections.Counter(),
'indexing_time_secs': 0,
'locales': []
}
for locale in app_context.get_allowed_locales():
app_context.set_current_locale(locale)
course = courses.Course(None, app_context=app_context)
stats = index_all_docs(course, self.incremental)
indexing_stats['num_indexed_docs'] += stats['num_indexed_docs']
indexing_stats['doc_types'] += stats['doc_types']
indexing_stats['indexing_time_secs'] += stats['indexing_time_secs']
indexing_stats['locales'].append(locale)
return indexing_stats
class ClearIndex(jobs.DurableJob):
"""A job that clears the index for a course."""
@staticmethod
def get_description():
return 'clear course index'
def run(self):
"""Clear the index."""
namespace = namespace_manager.get_namespace()
logging.info('Running clearing job for namespace %s.', namespace)
app_context = sites.get_app_context_for_namespace(namespace)
clear_stats = {
'deleted_docs': 0,
'locales': []
}
for locale in app_context.get_allowed_locales():
stats = clear_index(namespace, locale)
clear_stats['deleted_docs'] += stats['deleted_docs']
clear_stats['locales'].append(locale)
return clear_stats
# Module registration
custom_module = None
def register_module():
"""Registers this module in the registry."""
global_routes = [
('/modules/search/assets/.*', AssetsHandler),
('/cron/search/index_courses', CronHandler)
]
namespaced_routes = [
('/search', SearchHandler)
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_NAME,
'Provides search capabilities for courses',
global_routes, namespaced_routes)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courses module."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from common import resource
from controllers import assessments
from controllers import lessons
from controllers import utils
from models import content
from models import resources_display
from models import custom_modules
from models import roles
from tools import verify
All_LOCALES_PERMISSION = 'can_pick_all_locales'
All_LOCALES_DESCRIPTION = 'Can pick all locales, including unavailable ones.'
SEE_DRAFTS_PERMISSION = 'can_see_draft_content'
SEE_DRAFTS_DESCRIPTION = 'Can see lessons and assessments with draft status.'
custom_module = None
def can_pick_all_locales(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, All_LOCALES_PERMISSION)
def can_see_drafts(app_context):
return roles.Roles.is_user_allowed(
app_context, custom_module, SEE_DRAFTS_PERMISSION)
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
roles.Roles.register_permissions(custom_module, permissions_callback)
resource.Registry.register(resources_display.ResourceCourseSettings)
resource.Registry.register(resources_display.ResourceUnit)
resource.Registry.register(resources_display.ResourceAssessment)
resource.Registry.register(resources_display.ResourceLink)
resource.Registry.register(resources_display.ResourceLesson)
resource.Registry.register(utils.ResourceHtmlHook)
def permissions_callback(unused_app_context):
return [
roles.Permission(All_LOCALES_PERMISSION, All_LOCALES_DESCRIPTION),
roles.Permission(SEE_DRAFTS_PERMISSION, SEE_DRAFTS_DESCRIPTION)
]
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [
('/', lessons.CourseHandler),
('/activity', lessons.UnitHandler),
('/answer', assessments.AnswerHandler),
('/assessment', lessons.AssessmentHandler),
('/course', lessons.CourseHandler),
('/forum', utils.ForumHandler),
('/preview', utils.PreviewHandler),
('/register', utils.RegisterHandler),
('/rest/locale', utils.StudentLocaleRESTHandler),
('/review', lessons.ReviewHandler),
('/reviewdashboard', lessons.ReviewDashboardHandler),
('/student/editstudent', utils.StudentEditStudentHandler),
('/student/settracks', utils.StudentSetTracksHandler),
('/student/home', utils.StudentProfileHandler),
('/student/unenroll', utils.StudentUnenrollHandler),
('/unit', lessons.UnitHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course',
'A set of pages for delivering an online course.',
[], courses_routes,
notify_module_enabled=on_module_enabled)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing GIFT format."""
__author__ = 'borislavr@google.com (Boris Roussev)'
import logging
from pyparsing import alphanums
from pyparsing import Combine
from pyparsing import LineEnd
from pyparsing import Literal
from pyparsing import nums
from pyparsing import OneOrMore
from pyparsing import Optional
from pyparsing import ParseException
from pyparsing import restOfLine
from pyparsing import SkipTo
from pyparsing import Suppress
from pyparsing import Word
class ParseError(Exception):
"""Exception raised to show that a validation failed."""
def to_dict(item):
if isinstance(item, list):
return [to_dict(x) for x in item]
elif isinstance(item, tuple):
if isinstance(item[0], tuple):
return dict([(x[0], to_dict(x[1])) for x in item])
else:
return {item[0]: to_dict(item[1])}
else:
return item
def sep(separator):
"""Makes a suppressed separator."""
return Suppress(Literal(separator))
def strip_spaces(value):
"""Strips leading and trailing white spaces."""
return value[0].strip()
def make_int(value):
"""Makes an int value lambda."""
return int(value[0])
def make_true(unused_value):
"""Makes a True boolean value lambda."""
return True
def make_false(unused_value):
"""Makes a False boolean value lambda."""
return False
def make_float(tokens):
"""Makes a float value lambda."""
return float(tokens[0])
def batch(tokens, size=3):
return zip(*[iter(tokens)] * size)
def set_multi_answer_question(toks, question_type):
choices = next(
x[1] for x in toks if isinstance(x, tuple) and x[0] == 'choices')
return ('question', (
('type', question_type),
('title', toks.title),
('task', toks.task),
('choices', choices)))
# multi choice question parse actions
def set_multi_choice_answer(toks):
weight = 100 if toks.sign == '=' else toks.weight
return (
('sign', toks.sign),
('score', weight),
('text', toks.answer),
('feedback', toks.feedback))
def set_multi_choice_answers(toks):
return ('choices', toks.asList())
def set_multi_choice_question(toks):
return set_multi_answer_question(toks, 'multi_choice')
# short answer question parse actions
set_short_answer = set_multi_choice_answer
set_short_answers = set_multi_choice_answers
def set_short_answer_question(toks):
return set_multi_answer_question(toks, 'short_answer')
# true false question parse actions
def set_true_false_question(toks):
return ('question', (
('type', 'true_false'),
('title', toks.title),
('task', toks.task),
('choices', [
(('text', toks.answer), ('feedback', toks.feedback))])))
# match answer question parse actions
def set_match_answer(toks):
return (
('lhs', toks.lhs),
('rhs', toks.rhs),
('feedback', toks.feedback))
def set_match_answers(toks):
return ('choices', toks.asList())
def set_match_answer_question(toks):
return set_multi_answer_question(toks, 'match_answer')
# missing word question parse actions
def set_missing_word_question(toks):
choices = next(
x[1] for x in toks if isinstance(x, tuple) and x[0] == 'choices')
return ('question', (
('type', 'missing_word'),
('prefix', toks.prefix),
('choices', choices),
('suffix', toks.suffix)))
# numeric question parse actions
def set_numeric_question(toks):
return ('question', (
('type', 'numeric'),
('title', toks.title),
('task', toks.task),
('choices', [(
('text', toks.answer),
('error', toks.error),
('min', toks.min),
('max', toks.max),
('feedback', toks.feedback))])))
def set_essay_question(toks):
return ('question', (
('type', 'essay'),
('title', toks.title),
('task', toks.task)))
def set_questions(toks):
return ('questions', toks)
class GiftParser(object):
"""Parser for GIFT format questions."""
# separators, which have been suppressed
double_colon = sep('::')
colon = sep(':')
span = sep('..')
left_curly = sep('{')
right_curly = sep('}')
equals = sep('=')
tilda = sep('~')
percent = sep('%')
arrow = sep('->')
pound = sep('#')
dbl_fwd_slash = sep('//')
# integer signs
plus = Literal('+')
minus = Literal('-')
bool_true = (
Literal('TRUE') |
Literal('T')).setParseAction(make_true)
bool_false = (
Literal('FALSE') |
Literal('F')).setParseAction(make_false)
boolean = bool_true | bool_false
plus_or_minus = plus | minus
number = Word(nums)
integer = Combine(
Optional(plus_or_minus) + number).setParseAction(make_int)
unsigned_float = Combine(
Word(nums) +
Optional(Word('.', nums))).setParseAction(make_float)
signed_float = Combine(
Optional(plus_or_minus) +
Word(nums) +
Optional(Word('.', nums))).setParseAction(make_float)
blank_lines = Suppress(LineEnd() + OneOrMore(LineEnd()))
comment = dbl_fwd_slash + restOfLine
title = (double_colon +
SkipTo(double_colon).setParseAction(
strip_spaces)('title') +
double_colon)
task = SkipTo(left_curly).setParseAction(
strip_spaces)('task')
# Multiple choice questions with one correct answer.
#
# // question: 1 name: Grants tomb
# ::Grants tomb::Who is buried in Grant's tomb in New York City? {
# =Grant
# ~No one
# #Was true for 12 years
# ~Napoleon
# #He was buried in France
# ~Churchill
# #He was buried in England
# ~Mother Teresa
# #She was buried in India }
#
# Multiple choice questions with multiple right answers.
#
# What two people are entombed in Grant's tomb? {
# ~%-100%No one
# ~%50%Grant
# ~%50%Grant's wife
# ~%-100%Grant's father}
eof_multi_choice_answer = equals | tilda | right_curly
ext_eof_multi_choice_answer = pound | eof_multi_choice_answer
# '# hello world ~'
multi_choice_feedback = Combine(
pound +
SkipTo(eof_multi_choice_answer).setParseAction(strip_spaces))
# 'answer #'
multi_choice_answer_text = SkipTo(
ext_eof_multi_choice_answer).setParseAction(strip_spaces)
weight = Combine(percent + integer + percent).setParseAction(make_int)
multi_choice_answer = (
(Literal('=')('sign') |
Literal('~')('sign') + Optional(weight, default=0)('weight')) +
multi_choice_answer_text('answer') +
Optional(multi_choice_feedback, default='')('feedback')
).setParseAction(set_multi_choice_answer)
multi_choice_answers = OneOrMore(multi_choice_answer)
multi_choice_question = (
Optional(title, default='') +
task +
left_curly +
multi_choice_answers.setParseAction(set_multi_choice_answers) +
right_curly
).setParseAction(set_multi_choice_question)
multi_choice_question.ignore(comment)
# True-false questions.
# Sample:
# // question: 0 name: TrueStatement using {T} style
# ::TrueStatement about Grant::Grant was buried in a tomb in NY.{T}
#
# // question: 0 name: FalseStatement using {FALSE} style
# ::FalseStatement about sun::The sun rises in the West.{FALSE}
true_false_feedback = Combine(
pound +
SkipTo(right_curly).setParseAction(strip_spaces))
true_false_answer = (
left_curly +
boolean('answer') +
Optional(true_false_feedback, default='')('feedback') +
right_curly)
true_false_question = (
Optional(title, default='') +
task +
true_false_answer
).setParseAction(set_true_false_question)
true_false_question.ignore(comment)
# Short answer questions.
# Samples:
# Who's buried in Grant's tomb?{=Grant =Ulysses S. Grant =Ulysses Grant}
# Two plus two equals {=four =4}
eof_short_answer_answer = equals | right_curly
ext_eof_short_answer = pound | eof_short_answer_answer
short_answer_feedback = Combine(
pound +
SkipTo(eof_short_answer_answer).setParseAction(strip_spaces))
short_answer_text = SkipTo(ext_eof_short_answer).setParseAction(
strip_spaces)
short_answer = (
equals +
short_answer_text('answer') +
Optional(short_answer_feedback, default='')('feedback')
).setParseAction(set_short_answer)
short_answers = (
OneOrMore(short_answer) +
right_curly +
LineEnd())
short_answer_question = (
Optional(title, default='') +
task +
left_curly +
short_answers.setParseAction(set_short_answers)
).setParseAction(set_short_answer_question)
short_answer_question.ignore(comment)
# Matching questions.
# Sample:
# Match the following countries with their corresponding capitals. {
# =Canada -> Ottawa
# =Italy -> Rome
# =Japan -> Tokyo
# =India -> New Delhi
# }
eof_match_answer = equals | right_curly
ext_eof_match_answer = pound | equals | right_curly
match_feedback = Combine(
pound +
SkipTo(eof_match_answer).setParseAction(strip_spaces))
lhs = SkipTo(arrow).setParseAction(strip_spaces)
match_answer = (
equals +
lhs('lhs') +
arrow +
SkipTo(ext_eof_match_answer)('rhs') +
Optional(match_feedback, default='')('feedback')
).setParseAction(set_match_answer)
match_answers = (
left_curly +
match_answer + match_answer + OneOrMore(match_answer) +
right_curly)
match_question = (
Optional(title, default='') +
task +
match_answers.setParseAction(set_match_answers)
).setParseAction(set_match_answer_question)
match_question.ignore(comment)
# Missing word questions.
#
# CB costs {~lots of money =nothing ~a small amount} to download.
missing_word_answers = multi_choice_answers
prefix = SkipTo(left_curly)
suffix = Combine(OneOrMore(Word(alphanums)))
missing_word_question = (
prefix('prefix') +
left_curly +
missing_word_answers.setParseAction(set_multi_choice_answers) +
right_curly +
suffix('suffix')
).setParseAction(set_missing_word_question)
# Numeric questions.
# No support for multiple numeric answers.
# Sample: When was Ulysses S. Grant born?{#1822:5}
numeric_single_answer = (
left_curly +
pound +
signed_float.setParseAction(make_float)('answer') +
Optional(
colon +
unsigned_float.setParseAction(make_float)('error')) +
Optional(match_feedback, default='')('feedback') +
right_curly)
numeric_range_answer = (
left_curly +
pound +
signed_float.setParseAction(make_float)('min') +
span +
signed_float.setParseAction(make_float)('max') +
right_curly)
numeric_answer = (
numeric_range_answer |
numeric_single_answer)
numeric_question = (
Optional(title, default='') +
task +
numeric_answer
).setParseAction(set_numeric_question)
numeric_question.ignore(comment)
# Essay questions.
# Write a short biography of Dag Hammarskjold. {}
essay_answer = left_curly + right_curly
essay_question = (
Optional(title, default='') +
task +
essay_answer
).setParseAction(set_essay_question)
essay_question.ignore(comment)
question = (
essay_question |
match_question |
numeric_question |
missing_word_question |
multi_choice_question |
true_false_question |
short_answer_question)
bnf = OneOrMore(question)
@classmethod
def parse(cls, text):
try:
return cls.bnf.parseString(text)
except ParseException as e:
logging.exception('Invalid GIFT syntax: %s', text)
raise ParseError(e.msg)
@classmethod
def parse_questions(cls, text):
"""Parses a list new-line separated GIFT questions to."""
tree = cls.parse(text)
return [GiftAdapter().convert_to_question(node) for node in tree]
class GiftAdapter(object):
"""Converts a GIFT-formatted question to a CB question dict."""
QUESTION_TYPES = ['multi_choice', 'true_false', 'short_answer', 'numeric']
def normalize_score(self, score):
return score / 100.0
def convert_to_question(self, result):
src = to_dict(result[1])
question = self.build_question(src)
if question['type'] == 'multi_choice':
question['type'] = self.determine_question_type(question)
return self.add_choices(question)
def build_question(self, src):
"""Builds a question dictionary from a ParseResult object."""
if src['type'] not in self.QUESTION_TYPES:
raise ValueError(
'Unsupported question type: %s' % src['type'])
question = {}
question['type'] = src['type']
question['question'] = src['task']
question['description'] = src['title'] or question['question']
question['choices'] = src['choices']
return question
def add_choices(self, question):
if question['type'] == 'true_false':
return self.add_true_false_choices(question)
elif question['type'] == 'multi_choice':
return self.add_multi_choice_answers(question)
elif question['type'] == 'short_answer':
return self.add_short_answer_choices(question)
elif question['type'] == 'numeric':
question['type'] = 'short_answer'
return self.add_numeric_choices(question)
else:
raise ParseError(
'Unsupported question type: %s' % question['type'])
def add_true_false_choices(self, question):
question['type'] = 'multi_choice'
question['multiple_selections'] = False
choice = question['choices'][0]
question['choices'] = []
question['choices'].append({
'text': 'True',
'score': 1.0 if choice['text'] else 0.0,
'feedback': choice['feedback']})
question['choices'].append({
'text': 'False',
'score': 1.0 if not choice['text'] else 0.0,
'feedback': choice['feedback']})
return question
def add_numeric_choices(self, question):
question['rows'] = '1'
question['columns'] = '100'
question['graders'] = []
for x in question.pop('choices'):
question['graders'].append({
'score': 1.0,
'response': x['text'],
'feedback': x['feedback'],
'matcher': 'numeric'
})
return question
def add_short_answer_choices(self, question):
question['rows'] = '1'
question['columns'] = '100'
question['graders'] = []
for x in question.pop('choices'):
question['graders'].append({
'score': self.normalize_score(x['score']),
'response': x['text'],
'feedback': x['feedback'],
'matcher': 'case_insensitive'
})
return question
def add_multi_choice_answers(self, question):
# {'text': 'c', 'score': 1.0, 'feedback': 'fb', 'sign': '='} ..}]}
question['choices'] = [dict(x) for x in question['choices']]
self.validate_multi_choice(question)
question['multiple_selections'] = self.is_multiple_selection(question)
for x in question['choices']:
x['score'] = self.normalize_score(x['score'])
del x['sign']
return question
def determine_question_type(self, question_dict):
signs = [x['sign'] for x in question_dict['choices']]
if all(x == '=' for x in signs):
return 'short_answer'
else:
return 'multi_choice'
def validate_multi_choice(self, question_dict):
signs = [x['sign'] for x in question_dict['choices']]
if len(signs) < 2:
msg_template = 'Multi-choice question with one choice: %s'
logging.error(msg_template, question_dict)
raise ParseError(msg_template % question_dict)
scores = [x['score'] for x in question_dict['choices']]
total = sum(scores)
if total != 100:
msg_template = "Choices' weights do not add up to 100: %s"
logging.error(msg_template, question_dict)
raise ParseError(msg_template % question_dict)
if signs.count('=') > 1:
msg_template = ('Multi-choice single-select question with more '
'than one correct choice: %s')
logging.error(msg_template, question_dict)
raise ParseError(msg_template % question_dict)
def is_multiple_selection(self, question_dict):
signs = [x['sign'] for x in question_dict['choices']]
if signs.count('=') == 1:
return False
if all(x == '~' for x in signs):
return True
# {=c1 =c2 ~c3} is invalid
raise ParseError('Unexpected choice types: %s' % question_dict)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing question tags."""
__author__ = 'sll@google.com (Sean Lip)'
import logging
import os
import jinja2
import appengine_config
from common import jinja_utils
from common import schema_fields
from common import tags
from models import custom_modules
from models import models as m_models
from models import resources_display
from models import transforms
RESOURCES_PATH = '/modules/assessment_tags/resources'
@appengine_config.timeandlog('render_question', duration_only=True)
def render_question(
quid, instanceid, embedded=False, weight=None, progress=None):
"""Generates the HTML for a question.
Args:
quid: String. The question id.
instanceid: String. The unique reference id for the question instance
(different instances of the same question in a page will have
different instanceids).
embedded: Boolean. Whether this question is embedded within a container
object.
weight: number. The weight to be used when grading the question in a
scored lesson. This value is cast to a float and, if this cast
fails, defaults to 1.0.
progress: None, 0 or 1. If None, no progress marker should be shown. If
0, a 'not-started' progress marker should be shown. If 1, a
'complete' progress marker should be shown.
Returns:
a Jinja markup string that represents the HTML for the question.
"""
try:
question_dto = m_models.QuestionDAO.load(quid)
except Exception: # pylint: disable=broad-except
logging.exception('Invalid question: %s', quid)
return '[Invalid question]'
if not question_dto:
return '[Question deleted]'
if weight is None:
weight = 1.0
else:
try:
weight = float(weight)
except ValueError:
weight = 1.0
template_values = question_dto.dict
template_values['embedded'] = embedded
template_values['instanceid'] = instanceid
template_values['resources_path'] = RESOURCES_PATH
if progress is not None:
template_values['progress'] = progress
template_file = None
js_data = {}
if question_dto.type == question_dto.MULTIPLE_CHOICE:
template_file = 'templates/mc_question.html'
multi = template_values['multiple_selections']
template_values['button_type'] = 'checkbox' if multi else 'radio'
choices = [{
'score': choice['score'], 'feedback': choice.get('feedback')
} for choice in template_values['choices']]
js_data['choices'] = choices
elif question_dto.type == question_dto.SHORT_ANSWER:
template_file = 'templates/sa_question.html'
js_data['graders'] = template_values['graders']
js_data['hint'] = template_values.get('hint')
js_data['defaultFeedback'] = template_values.get('defaultFeedback')
# The following two lines are included for backwards compatibility with
# v1.5 questions that do not have the row and column properties set.
template_values['rows'] = template_values.get(
'rows',
resources_display.SaQuestionConstants.DEFAULT_HEIGHT_ROWS)
template_values['columns'] = template_values.get(
'columns',
resources_display.SaQuestionConstants.DEFAULT_WIDTH_COLUMNS)
else:
return '[Unsupported question type]'
# Display the weight as an integer if it is sufficiently close to an
# integer. Otherwise, round it to 2 decimal places. This ensures that the
# weights displayed to the student are exactly the same as the weights that
# are used for grading.
weight = (int(round(weight)) if abs(weight - round(weight)) < 1e-6
else round(weight, 2))
template_values['displayed_weight'] = weight
if not embedded:
js_data['weight'] = float(weight)
template_values['js_data'] = transforms.dumps(js_data)
template = jinja_utils.get_template(
template_file, [os.path.dirname(__file__)])
return jinja2.utils.Markup(template.render(template_values))
class QuestionTag(tags.BaseTag):
"""A tag for rendering questions."""
binding_name = 'question'
def get_icon_url(self):
return '/modules/assessment_tags/resources/question.png'
@classmethod
def name(cls):
return 'Question'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, handler):
"""Renders a question."""
quid = node.attrib.get('quid')
weight = node.attrib.get('weight')
instanceid = node.attrib.get('instanceid')
progress = None
if (hasattr(handler, 'student') and not handler.student.is_transient
and not handler.lesson_is_scored):
progress = handler.get_course().get_progress_tracker(
).get_component_progress(
handler.student, handler.unit_id, handler.lesson_id,
instanceid)
html_string = render_question(
quid, instanceid, embedded=False, weight=weight,
progress=progress)
return tags.html_string_to_element_tree(html_string)
def get_schema(self, handler):
"""Get the schema for specifying the question."""
question_list = []
if handler:
questions = m_models.QuestionDAO.get_all()
question_list = [(
unicode(q.id), # q.id is an int; schema requires a string
q.description) for q in questions]
if not question_list:
return self.unavailable_schema('No questions available')
reg = schema_fields.FieldRegistry('Question')
reg.add_property(
schema_fields.SchemaField(
'quid', 'Question', 'string', optional=True, i18n=False,
select_data=question_list))
reg.add_property(
schema_fields.SchemaField(
'weight', 'Weight', 'string', optional=True, i18n=False,
extra_schema_dict_values={'value': '1'},
description='The number of points for a correct answer.'))
return reg
class QuestionGroupTag(tags.BaseTag):
"""A tag for rendering question groups."""
binding_name = 'question-group'
def get_icon_url(self):
return '/modules/assessment_tags/resources/question_group.png'
@classmethod
def name(cls):
return 'Question Group'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, handler):
"""Renders a question."""
qgid = node.attrib.get('qgid')
group_instanceid = node.attrib.get('instanceid')
question_group_dto = m_models.QuestionGroupDAO.load(qgid)
if not question_group_dto:
return tags.html_string_to_element_tree('[Deleted question group]')
template_values = question_group_dto.dict
template_values['embedded'] = False
template_values['instanceid'] = group_instanceid
template_values['resources_path'] = RESOURCES_PATH
if (hasattr(handler, 'student') and not handler.student.is_transient
and not handler.lesson_is_scored):
progress = handler.get_course().get_progress_tracker(
).get_component_progress(
handler.student, handler.unit_id, handler.lesson_id,
group_instanceid)
template_values['progress'] = progress
template_values['question_html_array'] = []
js_data = {}
for ind, item in enumerate(question_group_dto.dict['items']):
quid = item['question']
question_instanceid = '%s.%s.%s' % (group_instanceid, ind, quid)
template_values['question_html_array'].append(render_question(
quid, question_instanceid, weight=item['weight'],
embedded=True
))
js_data[question_instanceid] = item
template_values['js_data'] = transforms.dumps(js_data)
template_file = 'templates/question_group.html'
template = jinja_utils.get_template(
template_file, [os.path.dirname(__file__)])
html_string = template.render(template_values)
return tags.html_string_to_element_tree(html_string)
def get_schema(self, handler):
"""Get the schema for specifying the question group."""
question_group_list = []
if handler:
question_groups = m_models.QuestionGroupDAO.get_all()
question_group_list = [(
unicode(q.id), # q.id is a number; schema requires a string
q.description) for q in question_groups]
if not question_group_list:
return self.unavailable_schema('No question groups available')
reg = schema_fields.FieldRegistry('Question Group')
reg.add_property(
schema_fields.SchemaField(
'qgid', 'Question Group', 'string', optional=True, i18n=False,
select_data=question_group_list))
return reg
custom_module = None
def register_module():
"""Registers this module in the registry."""
def when_module_enabled():
# Register custom tags.
tags.Registry.add_tag_binding(
QuestionTag.binding_name, QuestionTag)
tags.Registry.add_tag_binding(
QuestionGroupTag.binding_name, QuestionGroupTag)
for binding_name in (QuestionTag.binding_name,
QuestionGroupTag.binding_name):
for scope in (tags.EditorBlacklists.COURSE_SCOPE,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE):
tags.EditorBlacklists.register(binding_name, scope)
def when_module_disabled():
# Unregister custom tags.
tags.Registry.remove_tag_binding(QuestionTag.binding_name)
tags.Registry.remove_tag_binding(QuestionGroupTag.binding_name)
for binding_name in (QuestionTag, binding_name,
QuestionGroupTag.binding_name):
for scope in (tags.EditorBlacklists.COURSE_SCOPE,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE):
tags.EditorBlacklists.unregister(binding_name, scope)
# Add a static handler for icons shown in the rich text editor.
global_routes = [(
os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Question tags',
'A set of tags for rendering questions within a lesson body.',
global_routes,
[],
notify_module_enabled=when_module_enabled,
notify_module_disabled=when_module_disabled)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Announcements."""
__author__ = 'Saifu Angto (saifu@google.com)'
import datetime
import urllib
from common import tags
from common import utils
from common.schema_fields import FieldArray
from common.schema_fields import FieldRegistry
from common.schema_fields import SchemaField
from controllers.utils import BaseHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import ReflectiveRequestHandler
from controllers.utils import XsrfTokenManager
from models.resources_display import LabelGroupsHelper
from models import custom_modules
from models import entities
from models import models
from models import notify
from models import resources_display
from models import roles
from models import transforms
from models.models import MemcacheManager
from models.models import Student
from modules.oeditor import oeditor
from google.appengine.ext import db
class AnnouncementsRights(object):
"""Manages view/edit rights for announcements."""
@classmethod
def can_view(cls, unused_handler):
return True
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
@classmethod
def apply_rights(cls, handler, items):
"""Filter out items that current user can't see."""
if AnnouncementsRights.can_edit(handler):
return items
allowed = []
for item in items:
if not item.is_draft:
allowed.append(item)
return allowed
class AnnouncementsHandler(BaseHandler, ReflectiveRequestHandler):
"""Handler for announcements."""
default_action = 'list'
get_actions = [default_action, 'edit']
post_actions = ['add', 'delete']
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [('/rest/announcements/item', AnnouncementsItemRESTHandler)]
def get_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'/announcements?%s' % urllib.urlencode(args))
def format_items_for_template(self, items):
"""Formats a list of entities into template values."""
template_items = []
for item in items:
item = transforms.entity_to_dict(item)
# add 'edit' actions
if AnnouncementsRights.can_edit(self):
item['edit_action'] = self.get_action_url(
'edit', key=item['key'])
item['delete_xsrf_token'] = self.create_xsrf_token('delete')
item['delete_action'] = self.get_action_url(
'delete', key=item['key'])
template_items.append(item)
output = {}
output['children'] = template_items
# add 'add' action
if AnnouncementsRights.can_edit(self):
output['add_xsrf_token'] = self.create_xsrf_token('add')
output['add_action'] = self.get_action_url('add')
return output
def _render(self):
self.template_value['navbar'] = {'announcements': True}
self.render('announcements.html')
def get_list(self):
"""Shows a list of announcements."""
student = None
user = self.personalize_page_and_get_user()
transient_student = False
if user is None:
transient_student = True
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
transient_student = True
self.template_value['transient_student'] = transient_student
items = AnnouncementEntity.get_announcements()
items = AnnouncementsRights.apply_rights(self, items)
if not roles.Roles.is_course_admin(self.get_course().app_context):
items = models.LabelDAO.apply_course_track_labels_to_student_labels(
self.get_course(), student, items)
self.template_value['announcements'] = self.format_items_for_template(
items)
self._render()
def get_edit(self):
"""Shows an editor for an announcement."""
user = self.personalize_page_and_get_user()
if not user or not AnnouncementsRights.can_edit(self):
self.error(401)
return
key = self.request.get('key')
schema = AnnouncementsItemRESTHandler.SCHEMA(
'Announcement',
self.get_course().get_course_announcement_list_email())
exit_url = self.canonicalize_url(
'/announcements#%s' % urllib.quote(key, safe=''))
rest_url = self.canonicalize_url('/rest/announcements/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
schema.get_json_schema(),
schema.get_schema_dict(),
key, rest_url, exit_url,
required_modules=AnnouncementsItemRESTHandler.REQUIRED_MODULES)
self.template_value['content'] = form_html
self._render()
def post_delete(self):
"""Deletes an announcement."""
if not AnnouncementsRights.can_delete(self):
self.error(401)
return
key = self.request.get('key')
entity = AnnouncementEntity.get(key)
if entity:
entity.delete()
self.redirect('/announcements')
def post_add(self):
"""Adds a new announcement and redirects to an editor for it."""
if not AnnouncementsRights.can_add(self):
self.error(401)
return
entity = AnnouncementEntity()
entity.title = 'Sample Announcement'
entity.date = datetime.datetime.now().date()
entity.html = 'Here is my announcement!'
entity.is_draft = True
entity.put()
self.redirect(self.get_action_url('edit', key=entity.key()))
class AnnouncementsItemRESTHandler(BaseRESTHandler):
"""Provides REST API for an announcement."""
REQUIRED_MODULES = [
'inputex-date', 'gcb-rte', 'inputex-select', 'inputex-string',
'inputex-uneditable', 'inputex-checkbox', 'inputex-list',
'inputex-hidden']
@classmethod
def SCHEMA(cls, title, announcement_email):
schema = FieldRegistry(title)
schema.add_property(SchemaField(
'key', 'ID', 'string', editable=False,
extra_schema_dict_values={'className': 'inputEx-Field keyHolder'}))
schema.add_property(SchemaField(
'title', 'Title', 'string', optional=True))
schema.add_property(SchemaField(
'html', 'Body', 'html', optional=True,
extra_schema_dict_values={
'supportCustomTags': tags.CAN_USE_DYNAMIC_TAGS.value,
'excludedCustomTags': tags.EditorBlacklists.COURSE_SCOPE}))
schema.add_property(SchemaField(
'date', 'Date', 'date',
optional=True, extra_schema_dict_values={
'_type': 'date', 'dateFormat': 'Y-m-d',
'valueFormat': 'Y-m-d'}))
schema.add_property(FieldArray(
'label_groups', 'Labels',
item_type=LabelGroupsHelper.make_labels_group_schema_field(),
extra_schema_dict_values={
'className': 'inputEx-Field label-group-list'}))
schema.add_property(SchemaField(
'send_email', 'Send Email', 'boolean', optional=True,
extra_schema_dict_values={
'description':
AnnouncementsItemRESTHandler.get_send_email_description(
announcement_email)}))
schema.add_property(SchemaField(
'is_draft', 'Status', 'boolean',
select_data=[
(True, resources_display.DRAFT_TEXT),
(False, resources_display.PUBLISHED_TEXT)],
extra_schema_dict_values={'className': 'split-from-main-group'}))
return schema
@classmethod
def get_send_email_description(cls, announcement_email):
"""Get the description for Send Email field."""
if announcement_email:
return 'Email will be sent to : ' + announcement_email
return 'Announcement list not configured.'
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
key = self.request.get('key')
try:
entity = AnnouncementEntity.get(key)
except db.BadKeyError:
entity = None
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
viewable = AnnouncementsRights.apply_rights(self, [entity])
if not viewable:
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = viewable[0]
schema = AnnouncementsItemRESTHandler.SCHEMA(
'Announcement',
self.get_course().get_course_announcement_list_email())
entity_dict = transforms.entity_to_dict(entity)
entity_dict['label_groups'] = (
LabelGroupsHelper.announcement_labels_to_dict(entity))
json_payload = transforms.dict_to_json(
entity_dict, schema.get_json_schema_dict())
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'announcement-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'announcement-put', {'key': key}):
return
if not AnnouncementsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = AnnouncementEntity.get(key)
if not entity:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
schema = AnnouncementsItemRESTHandler.SCHEMA(
'Announcement',
self.get_course().get_course_announcement_list_email())
payload = request.get('payload')
update_dict = transforms.json_to_dict(
transforms.loads(payload), schema.get_json_schema_dict())
entity.labels = utils.list_to_text(
LabelGroupsHelper.decode_labels_group(
update_dict.get('label_groups')))
transforms.dict_to_entity(entity, update_dict)
entity.put()
email_sent = False
if entity.send_email:
email_manager = notify.EmailManager(self.get_course())
email_sent = email_manager.send_announcement(
entity.title, entity.html)
if entity.send_email and not email_sent:
if not self.get_course().get_course_announcement_list_email():
message = 'Saved. Announcement list not configured.'
else:
message = 'Saved, but there was an error sending email.'
else:
message = 'Saved.'
transforms.send_json_response(self, 200, message)
class AnnouncementEntity(entities.BaseEntity):
"""A class that represents a persistent database entity of announcement."""
title = db.StringProperty(indexed=False)
date = db.DateProperty()
html = db.TextProperty(indexed=False)
labels = db.StringProperty(indexed=False)
is_draft = db.BooleanProperty()
send_email = db.BooleanProperty()
memcache_key = 'announcements'
@classmethod
def get_announcements(cls, allow_cached=True):
items = MemcacheManager.get(cls.memcache_key)
if not allow_cached or items is None:
items = AnnouncementEntity.all().order('-date').fetch(1000)
# TODO(psimakov): prepare to exceed 1MB max item size
# read more here: http://stackoverflow.com
# /questions/5081502/memcache-1-mb-limit-in-google-app-engine
MemcacheManager.set(cls.memcache_key, items)
return items
def put(self):
"""Do the normal put() and also invalidate memcache."""
result = super(AnnouncementEntity, self).put()
MemcacheManager.delete(self.memcache_key)
return result
def delete(self):
"""Do the normal delete() and invalidate memcache."""
super(AnnouncementEntity, self).delete()
MemcacheManager.delete(self.memcache_key)
custom_module = None
def register_module():
"""Registers this module in the registry."""
announcement_handlers = [('/announcements', AnnouncementsHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course Announcements',
'A set of pages for managing course announcements.',
[], announcement_handlers)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course explorer module."""
__author__ = 'Rahul Singal (rahulsingal@google.com)'
from common import safe_dom
from controllers import utils
from models import custom_modules
from models.config import ConfigProperty
from models.models import StudentProfileDAO
from modules.course_explorer import student
from google.appengine.api import users
GCB_ENABLE_COURSE_EXPLORER_PAGE = ConfigProperty(
'gcb_enable_course_explorer_page', bool,
safe_dom.NodeList().append(
safe_dom.Element('p').add_text("""
If this option is selected, "/" redirects to the course explorer page.
Otherwise, it redirects to the preview page for the default course.""")
), False, multiline=False, validator=None)
custom_module = None
class ExplorerPageInitializer(utils.PageInitializer):
"""Page initializer for explorer page.
Allow links to the course explorer to be added
to the navbars of all course pages.
"""
@classmethod
def initialize(cls, template_values):
template_values.update(
{'show_course_explorer_tab': GCB_ENABLE_COURSE_EXPLORER_PAGE.value})
user = users.get_current_user()
if user:
profile = StudentProfileDAO.get_profile_by_user_id(
users.get_current_user().user_id())
template_values.update({'has_global_profile': profile is not None})
def register_module():
"""Registers this module in the registry."""
# set the page initializer
utils.PageInitializerService.set(ExplorerPageInitializer)
# setup routes
explorer_routes = [
('/', student.IndexPageHandler),
('/explorer', student.AllCoursesHandler),
(r'/explorer/assets/(.*)', student.AssetsHandler),
('/explorer/courses', student.RegisteredCoursesHandler),
('/explorer/profile', student.ProfileHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course Explorer',
'A set of pages for delivering an online course.',
explorer_routes, [])
return custom_module
def unregister_module():
"""Unregisters this module in the registry."""
# set the page intializer to default.
utils.PageInitializerService.set(utils.DefaultPageInitializer)
return custom_modules
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting courses viewed by a student."""
__author__ = 'Rahul Singal (rahulsingal@google.com)'
import mimetypes
import os
import course_explorer
import webapp2
import appengine_config
from common import jinja_utils
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import PageInitializerService
from controllers.utils import XsrfTokenManager
from models import courses as Courses
from models import transforms
from models.models import StudentProfileDAO
from models.roles import Roles
from google.appengine.api import users
# We want to use views file in both /views and /modules/course_explorer/views.
TEMPLATE_DIRS = [
os.path.join(appengine_config.BUNDLE_ROOT, 'views'),
os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'course_explorer', 'views'),
]
STUDENT_RENAME_GLOBAL_XSRF_TOKEN_ID = 'rename-student-global'
# Int. Maximum number of bytes App Engine's db.StringProperty can store.
_STRING_PROPERTY_MAX_BYTES = 500
class IndexPageHandler(webapp2.RequestHandler):
"""Handles routing of root URL."""
def get(self):
"""Handles GET requests."""
if course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.value:
self.redirect('/explorer')
return
index = sites.get_course_index()
if index.get_all_courses():
course = index.get_course_for_path('/')
if not course:
course = index.get_all_courses()[0]
self.redirect(ApplicationHandler.canonicalize_url_for(
course, '/course?use_last_location=true'))
else:
self.redirect('/admin/welcome')
class BaseStudentHandler(webapp2.RequestHandler):
"""Base Handler for a student's courses."""
def __init__(self, *args, **kwargs):
super(BaseStudentHandler, self).__init__(*args, **kwargs)
self.template_values = {}
self.initialize_student_state()
def get_locale_for_user(self):
"""Chooses locale for a user."""
return 'en_US' # TODO(psimakov): choose proper locale from profile
def initialize_student_state(self):
"""Initialize course information related to student."""
PageInitializerService.get().initialize(self.template_values)
self.enrolled_courses_dict = {}
self.courses_progress_dict = {}
user = users.get_current_user()
if not user:
return
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
if not profile:
return
self.template_values['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.enrolled_courses_dict = transforms.loads(profile.enrollment_info)
if self.enrolled_courses_dict:
self.template_values['has_enrolled_courses'] = True
if profile.course_info:
self.courses_progress_dict = transforms.loads(profile.course_info)
def get_public_courses(self):
"""Get all the public courses."""
public_courses = []
for course in sites.get_all_courses():
if ((course.now_available and Roles.is_user_whitelisted(course))
or Roles.is_course_admin(course)):
public_courses.append(course)
return public_courses
def is_enrolled(self, course):
"""Returns true if student is enrolled else false."""
return bool(
self.enrolled_courses_dict.get(course.get_namespace_name()))
def is_completed(self, course):
"""Returns true if student has completed course else false."""
info = self.courses_progress_dict.get(course.get_namespace_name())
if info and 'final_grade' in info:
return True
return False
def get_course_info(self, course):
"""Returns course info required in views."""
info = sites.ApplicationContext.get_environ(course)
slug = course.get_slug()
course_preview_url = slug
if slug == '/':
course_preview_url = '/course'
slug = ''
info['course']['slug'] = slug
info['course']['course_preview_url'] = course_preview_url
info['course']['is_registered'] = self.is_enrolled(course)
info['course']['is_completed'] = self.is_completed(course)
return info
def get_enrolled_courses(self, courses):
"""Returns list of courses registered by student."""
enrolled_courses = []
for course in courses:
if self.is_enrolled(course):
enrolled_courses.append(self.get_course_info(course))
return enrolled_courses
def initialize_page_and_get_user(self):
"""Add basic fields to template and return user."""
self.template_values['course_info'] = Courses.COURSE_TEMPLATE_DICT
self.template_values['course_info']['course'] = {
'locale': self.get_locale_for_user()}
user = users.get_current_user()
if not user:
self.template_values['loginUrl'] = users.create_login_url('/')
else:
self.template_values['email'] = user.email()
self.template_values['is_super_admin'] = Roles.is_super_admin()
self.template_values['logoutUrl'] = users.create_logout_url('/')
return user
def is_valid_xsrf_token(self, action):
"""Asserts the current request has proper XSRF token or fails."""
token = self.request.get('xsrf_token')
return token and XsrfTokenManager.is_xsrf_token_valid(token, action)
class NullHtmlHooks(object):
"""Provide a non-null callback object for pages asking for hooks.
In contexts where we have no single course to use to determine
hook contents, we simply return blank content.
"""
def insert(self, unused_name):
return ''
class ProfileHandler(BaseStudentHandler):
"""Global profile handler for a student."""
def _storable_in_string_property(self, value):
# db.StringProperty can hold 500B. len(1_unicode_char) == 1, so len() is
# not a good proxy for unicode string size. Instead, cast to utf-8-
# encoded str first.
return len(value.encode('utf-8')) <= _STRING_PROPERTY_MAX_BYTES
def get(self):
"""Handles GET requests."""
if not course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.value:
self.error(404)
return
user = self.initialize_page_and_get_user()
if not user:
self.redirect('/explorer')
return
courses = self.get_public_courses()
self.template_values['student'] = (
StudentProfileDAO.get_profile_by_user_id(user.user_id()))
self.template_values['navbar'] = {'profile': True}
self.template_values['courses'] = self.get_enrolled_courses(courses)
self.template_values['student_edit_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token(
STUDENT_RENAME_GLOBAL_XSRF_TOKEN_ID))
self.template_values['html_hooks'] = NullHtmlHooks()
self.template_values['student_preferences'] = {}
template = jinja_utils.get_template(
'profile.html', TEMPLATE_DIRS)
self.response.write(template.render(self.template_values))
def post(self):
"""Handles post requests."""
if not self.is_valid_xsrf_token(STUDENT_RENAME_GLOBAL_XSRF_TOKEN_ID):
self.error(403)
return
user = self.initialize_page_and_get_user()
if not user:
self.redirect('/explorer')
return
new_name = self.request.get('name')
if not (new_name and self._storable_in_string_property(new_name)):
self.error(400)
return
StudentProfileDAO.update(
user.user_id(), None, nick_name=new_name, profile_only=True)
self.redirect('/explorer/profile')
class AllCoursesHandler(BaseStudentHandler):
"""Handles list of courses that can be viewed by a student."""
def get(self):
"""Handles GET requests."""
if not course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.value:
self.error(404)
return
self.initialize_page_and_get_user()
courses = self.get_public_courses()
self.template_values['courses'] = (
[self.get_course_info(course) for course in courses])
self.template_values['navbar'] = {'course_explorer': True}
self.template_values['html_hooks'] = NullHtmlHooks()
self.template_values['student_preferences'] = {}
template = jinja_utils.get_template(
'course_explorer.html', TEMPLATE_DIRS)
self.response.write(template.render(self.template_values))
class RegisteredCoursesHandler(BaseStudentHandler):
"""Handles registered courses view for a student."""
def get(self):
"""Handles GET request."""
if not course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.value:
self.error(404)
return
self.initialize_page_and_get_user()
courses = self.get_public_courses()
enrolled_courses = self.get_enrolled_courses(courses)
self.template_values['courses'] = enrolled_courses
self.template_values['navbar'] = {'mycourses': True}
self.template_values['can_enroll_more_courses'] = (
len(courses) - len(enrolled_courses) > 0)
self.template_values['html_hooks'] = NullHtmlHooks()
self.template_values['student_preferences'] = {}
template = jinja_utils.get_template(
'course_explorer.html', TEMPLATE_DIRS)
self.response.write(template.render(self.template_values))
class AssetsHandler(webapp2.RequestHandler):
"""Handles asset file for the home page."""
def get_mime_type(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
return default
return guess
def get(self, path):
"""Handles GET requests."""
if not course_explorer.GCB_ENABLE_COURSE_EXPLORER_PAGE.value:
self.error(404)
return
filename = '%s/assets/%s' % (appengine_config.BUNDLE_ROOT, path)
with open(filename, 'r') as f:
self.response.headers['Content-Type'] = self.get_mime_type(filename)
self.response.write(f.read())
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide the capability for registered students to invite others.
Setup:
Include the text of the invitation email in course.yaml with the key:
course:
invitation_email:
sender_email: <email_address_in_from_field>
subject_template: <text_of_the_email>
body_template: <text_of_the_email>
The templates can use Jinja includes for the following variables:
sender_name: The name of the current student, as entered in the
registration form.
unsubscribe_url: A URL for the recipient to use to unsubscribe from
future emails.
The invitation_email settings in course.yaml can also be edited in the
Dashboard under Settings > Course Options.
"""
__author__ = 'John Orr (jorr@google.com)'
import logging
import os
import re
import jinja2
import appengine_config
from common import crypto
from common import safe_dom
from common import schema_fields
from common import tags
from controllers import utils
from models import courses
from models import custom_modules
from models import models
from models import transforms
from modules.dashboard import course_settings
from modules.notifications import notifications
from modules.unsubscribe import unsubscribe
# The intent recorded for the emails sent by the notifications module
INVITATION_INTENT = 'course_invitation'
COURSE_SETTINGS_SCHEMA_SECTION = 'invitation'
RESOURCES_PATH = '/modules/invitation/resources'
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'invitation', 'templates')
INVITATION_EMAIL_KEY = 'invitation_email'
SENDER_EMAIL_KEY = 'sender_email'
SUBJECT_TEMPLATE_KEY = 'subject_template'
BODY_TEMPLATE_KEY = 'body_template'
# In order to prevent spamming, the number of invitations which can be sent per
# user is limited.
MAX_EMAILS = 100
def is_email_valid(email):
# TODO(jorr): Use google.appengine.api.mail.is_email_valid when Issue 7471
# is resolved:
# https://code.google.com/p/googleappengine/issues/detail?id=7471
return re.match(
r'^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}$', email, flags=re.IGNORECASE)
class InvitationEmail(object):
@classmethod
def is_available(cls, handler):
env = handler.app_context.get_environ()
email_env = env['course'].get(INVITATION_EMAIL_KEY, {})
return (
email_env.get(SENDER_EMAIL_KEY)
and email_env.get(SUBJECT_TEMPLATE_KEY)
and email_env.get(BODY_TEMPLATE_KEY))
def __init__(self, handler, recipient_email, sender_name):
self.recipient_email = recipient_email
env = handler.app_context.get_environ()
email_env = env['course'].get(INVITATION_EMAIL_KEY)
self.sender_email = email_env[SENDER_EMAIL_KEY]
self.subject_template = email_env[SUBJECT_TEMPLATE_KEY]
self.body_template = email_env[BODY_TEMPLATE_KEY]
self.email_vars = {
'sender_name': sender_name,
'unsubscribe_url': unsubscribe.get_unsubscribe_url(
handler, recipient_email)
}
def _render(self, template, env):
# Coerce template to unicode in case it is a LazyTranslator.
template = unicode(template)
return jinja2.Template(template).render(env)
@property
def subject(self):
return self._render(self.subject_template, self.email_vars)
@property
def body(self):
return self._render(self.body_template, self.email_vars)
def send(self):
notifications.Manager.send_async(
self.recipient_email,
self.sender_email,
INVITATION_INTENT,
self.body,
self.subject,
audit_trail=self.email_vars
)
class InvitationStudentProperty(models.StudentPropertyEntity):
"""Entity to hold the list of people already invited."""
PROPERTY_NAME = 'invitation-student-property'
EMAIL_LIST_KEY = 'email_list'
@classmethod
def load_or_create(cls, student):
entity = cls.get(student, cls.PROPERTY_NAME)
if entity is None:
entity = cls.create(student, cls.PROPERTY_NAME)
entity.value = '{}'
entity.put()
return entity
def is_in_invited_list(self, email):
value_dict = transforms.loads(self.value)
return email in value_dict.get(self.EMAIL_LIST_KEY, [])
def append_to_invited_list(self, email_list):
value_dict = transforms.loads(self.value)
email_set = set(value_dict.get(self.EMAIL_LIST_KEY, []))
email_set.update(email_list)
value_dict[self.EMAIL_LIST_KEY] = list(email_set)
self.value = transforms.dumps(value_dict)
def invited_list_size(self):
return len(transforms.loads(self.value))
def for_export(self, transform_fn):
"""Return a sanitized version of the model, with anonymized data."""
# Anonymize the email adresses in the email list and drop any
# additional data in the data value field.
model = super(InvitationStudentProperty, self).for_export(transform_fn)
value_dict = transforms.loads(model.value)
email_list = value_dict.get(self.EMAIL_LIST_KEY, [])
clean_email_list = [transform_fn(email) for email in email_list]
model.value = transforms.dumps({self.EMAIL_LIST_KEY: clean_email_list})
return model
class InvitationHandler(utils.BaseHandler):
"""Renders the student invitation panel."""
URL = 'modules/invitation'
def __init__(self):
super(InvitationHandler, self).__init__()
self.email_vars = {}
def render_for_email(self, template):
return jinja2.Template(template).render(self.email_vars)
def get(self):
user = self.personalize_page_and_get_user()
if user is None:
self.redirect('/course')
return
student = models.Student.get_enrolled_student_by_email(user.email())
if student is None:
self.redirect('/course')
return
if not InvitationEmail.is_available(self):
self.redirect('/course')
return
invitation_email = InvitationEmail(self, user.email(), student.name)
self.template_value['navbar'] = {}
self.template_value['xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token(
InvitationRESTHandler.XSRF_SCOPE))
self.template_value['subject'] = invitation_email.subject
self.template_value['body'] = invitation_email.body
template = self.get_template('invitation.html', [TEMPLATES_DIR])
self.response.out.write(template.render(self.template_value))
class InvitationRESTHandler(utils.BaseRESTHandler):
"""Custom REST handler for the invitation panel."""
URL = 'rest/modules/invitation'
XSRF_SCOPE = 'invitation'
SCHEMA = {
'type': 'object',
'properties': {
'emailList': {'type': 'string', 'optional': 'true'}
}
}
def before_method(self, verb, path):
# Handler needs to be locale-aware because the messages must be
# localized.
self._old_locale = self.app_context.get_current_locale()
new_locale = self.get_locale_for(self.request, self.app_context)
self.app_context.set_current_locale(new_locale)
def after_method(self, verb, path):
# Handler needs to be locale-aware because the messages must be
# localized.
self.app_context.set_current_locale(self._old_locale)
def post(self):
"""Handle POST requests."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, self.XSRF_SCOPE, {}):
return
user = self.get_user()
if not user:
transforms.send_json_response(self, 401, 'Access denied.', {})
return
student = models.Student.get_enrolled_student_by_email(user.email())
if not student:
transforms.send_json_response(self, 401, 'Access denied.', {})
return
if not InvitationEmail.is_available(self):
transforms.send_json_response(self, 500, 'Unavailable.', {})
return
payload_json = request.get('payload')
payload_dict = transforms.json_to_dict(payload_json, self.SCHEMA)
email_set = {
email.strip() for email in payload_dict.get('emailList').split(',')
if email.strip()}
if not email_set:
transforms.send_json_response(
# I18N: Error indicating no email addresses were submitted.
self, 400, self.gettext('Error: Empty email list'))
return
invitation_data = InvitationStudentProperty.load_or_create(student)
# Limit the number of emails a user can send, to prevent spamming
if invitation_data.invited_list_size() + len(email_set) > MAX_EMAILS:
missing_count = MAX_EMAILS - invitation_data.invited_list_size()
# I18N: Error indicating that the user cannot add the desired
# list of additional email addresses to the list of invitations;
# the total size of the list with the additions would be more
# than any single user is allowed to send. No email addresses
# were added to the list to send, and no further email messages
# were sent.
transforms.send_json_response(self, 200, self.gettext(
'This exceeds your email cap. Number of remaining '
'invitations: %s. No messages sent.' % missing_count))
return
messages = []
for email in email_set:
if not is_email_valid(email):
# I18N: Error indicating an email addresses is not well-formed.
messages.append(self.gettext(
'Error: Invalid email "%s"' % email))
elif invitation_data.is_in_invited_list(email):
# I18N: Error indicating an email addresses is already known.
messages.append(self.gettext(
'Error: You have already sent an invitation email to "%s"'
% email))
elif unsubscribe.has_unsubscribed(email):
# No message to the user, for privacy reasons
logging.info('Declined to send email to unsubscribed user')
elif models.Student.get_enrolled_student_by_email(email):
# No message to the user, for privacy reasons
logging.info('Declined to send email to registered user')
else:
InvitationEmail(self, email, student.name).send()
invitation_data.append_to_invited_list(email_set)
invitation_data.put()
if messages:
# I18N: Error indicating not all email messages were sent.
messages.insert(0, self.gettext(
'Not all messages were sent (%s / %s):') % (
len(email_set) - len(messages), len(email_set)))
transforms.send_json_response(self, 400, '\n'.join(messages))
else:
transforms.send_json_response(
self, 200,
# I18N: Success message indicating number of emails sent.
self.gettext('OK, %s messages sent' % len(email_set)))
def get_course_settings_fields():
enable = schema_fields.SchemaField(
'course:invitation_email:enabled',
'Enable Invitations', 'boolean',
description='Enable students to send emails inviting others to the '
'course.',
extra_schema_dict_values={
'className': 'invitation-enable inputEx-Field inputEx-CheckBox'},
optional=True)
sender_email = schema_fields.SchemaField(
'course:invitation_email:sender_email',
'Invitation Origin Email', 'string',
description='The email address shown as the sender for invitation '
'emails to this course.',
extra_schema_dict_values={'className': 'invitation-data inputEx-Field'},
optional=True, i18n=False)
subject_template = schema_fields.SchemaField(
'course:invitation_email:subject_template',
'Invitation Subject Line', 'string',
description='The subject line in invitation emails to this course. '
'Use the string {{sender_name}} to include the name of the student '
'issuing the invitation in the subject line.',
extra_schema_dict_values={'className': 'invitation-data inputEx-Field'},
optional=True)
body_template = schema_fields.SchemaField(
'course:invitation_email:body_template',
'Invitation Body', 'text',
description='The body of invitation emails to this course. '
'Use the string {{sender_name}} to include the name of the student '
'issuing the invitation. To avoid spamming, you should always '
'include the string {{unsubscribe_url}} in your message to include '
'a link which the recipient can use to unsubscribe from future '
'invitations.',
extra_schema_dict_values={'className': 'invitation-data inputEx-Field'},
optional=True)
return (
lambda c: enable,
lambda c: sender_email,
lambda c: subject_template,
lambda c: body_template)
def get_student_profile_invitation_link(handler, unused_student, unused_course):
env = handler.app_context.get_environ()
email_env = env['course'].get(INVITATION_EMAIL_KEY, {})
if not email_env.get('enabled'):
return (None, None)
# I18N: Title encouraging user to invite friends to join a course
invitation_title = handler.gettext('Invite Friends')
if InvitationEmail.is_available(handler):
invitation_link = safe_dom.A(
InvitationHandler.URL
# I18N: Label on control asking user to invite friends to join.
).add_text(handler.gettext(
'Click to send invitations to family and friends'))
else:
# I18N: Inviting friends to join a course is not currently enabled.
invitation_link = safe_dom.Text(handler.gettext(
'Invitations not currently available'))
return (
invitation_title, invitation_link)
def get_student_profile_sub_unsub_link(handler, student, unused_course):
email = student.email
is_unsubscribed = unsubscribe.has_unsubscribed(email)
# I18N: Control allowing user to subscribe/unsubscribe from email invitation
sub_unsub_title = handler.gettext('Subscribe/Unsubscribe')
sub_unsub_message = safe_dom.NodeList()
if is_unsubscribed:
resubscribe_url = unsubscribe.get_resubscribe_url(handler, email)
sub_unsub_message.append(safe_dom.Text(
# I18N: Message - user has unsubscribed from email invitations.
handler.gettext(
'You are currently unsubscribed from course-related emails.')))
sub_unsub_message.append(safe_dom.A(resubscribe_url).add_text(
# I18N: Control allowing user to re-subscribe to email invitations.
handler.gettext('Click here to re-subscribe.')))
else:
unsubscribe_url = unsubscribe.get_unsubscribe_url(handler, email)
sub_unsub_message.append(safe_dom.Text(
# I18N: Text indicating user has opted in to email invitations.
handler.gettext(
'You are currently receiving course-related emails. ')))
sub_unsub_message.append(safe_dom.A(unsubscribe_url).add_text(
# I18N: Control allowing user to unsubscribe from email invitations.
handler.gettext('Click here to unsubscribe.')))
return (
sub_unsub_title, sub_unsub_message)
custom_module = None
def register_module():
"""Registers this module in the registry."""
course_settings_fields = get_course_settings_fields()
def on_module_enabled():
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
COURSE_SETTINGS_SCHEMA_SECTION] += course_settings_fields
utils.StudentProfileHandler.EXTRA_STUDENT_DATA_PROVIDERS += [
get_student_profile_invitation_link,
get_student_profile_sub_unsub_link]
course_settings.CourseSettingsHandler.ADDITIONAL_DIRS.append(
TEMPLATES_DIR)
course_settings.CourseSettingsHandler.EXTRA_JS_FILES.append(
'invitation_course_settings.js')
def on_module_disabled():
for field in course_settings_fields:
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
COURSE_SETTINGS_SCHEMA_SECTION].remove(field)
utils.StudentProfileHandler.EXTRA_STUDENT_DATA_PROVIDERS.remove(
get_student_profile_invitation_link)
utils.StudentProfileHandler.EXTRA_STUDENT_DATA_PROVIDERS.remove(
get_student_profile_sub_unsub_link)
course_settings.CourseSettingsHandler.ADDITIONAL_DIRS.remove(
TEMPLATES_DIR)
course_settings.CourseSettingsHandler.EXTRA_JS_FILES.remove(
'invitation_course_settings.js')
global_routes = [
(os.path.join(RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
namespaced_routes = [
('/' + InvitationHandler.URL, InvitationHandler),
('/' + InvitationRESTHandler.URL, InvitationRESTHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Invitation Page',
'A page to invite others to register.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disabled,
notify_module_enabled=on_module_enabled)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cron job definitions for the review subsystem."""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
from controllers import sites
from controllers import utils
from models import courses
from modules.review import review
from google.appengine.api import namespace_manager
_LOG = logging.getLogger('modules.reviews.cron')
logging.basicConfig()
class ExpireOldAssignedReviewsHandler(utils.BaseHandler):
"""Iterates through all units in all courses, expiring old review steps.
The system will run a maximum of one of these jobs at any given time. This
is enforced by the 10 minute execution time limit on cron jobs plus the
scheduler, which is configured to run this every 15 minutes.
Write operations done by this handler must be atomic since admins may visit
this page at any time, kicking off any number of runs.
"""
def get(self):
"""Runs the expiry operation once for each peer-reviwed unit."""
try:
self.response.headers['Content-Type'] = 'text/plain'
# namespace_string -> [{
# 'id': unit_id_string, 'review_window_mins': int}]
namespace_to_units = {} # namespace_string -> [unit_id_strings]
for context in sites.get_all_courses():
namespace = context.get_namespace_name()
namespace_to_units[namespace] = []
course = courses.Course(None, context)
for unit in course.get_peer_reviewed_units():
namespace_to_units[namespace].append({
'review_window_mins': (
unit.workflow.get_review_window_mins()),
'id': str(unit.unit_id),
})
total_count = 0
total_expired_count = 0
total_exception_count = 0
_LOG.info('Begin expire_old_assigned_reviews cron')
for namespace, units in namespace_to_units.iteritems():
start_namespace_message = (
('Begin processing course in namespace "%s"; %s unit%s '
'found') % (
namespace, len(units), '' if len(units) == 1 else 's'))
_LOG.info(start_namespace_message)
for unit in units:
begin_unit_message = 'Begin processing unit %s' % unit['id']
_LOG.info(begin_unit_message)
namespace_manager.set_namespace(namespace)
expired_keys, exception_keys = (
review.Manager.expire_old_reviews_for_unit(
unit['review_window_mins'], unit['id']))
unit_expired_count = len(expired_keys)
unit_exception_count = len(exception_keys)
unit_total_count = unit_expired_count + unit_exception_count
total_expired_count += unit_expired_count
total_exception_count += total_exception_count
total_count += unit_total_count
end_unit_message = (
'End processing unit %s. Expired: %s, Exceptions: %s, '
'Total: %s' % (
unit['id'], unit_expired_count,
unit_exception_count, unit_total_count))
_LOG.info(end_unit_message)
_LOG.info('Done processing namespace "%s"', namespace)
end_message = (
('End expire_old_assigned_reviews cron. Expired: %s, '
'Exceptions : %s, Total: %s') % (
total_expired_count, total_exception_count, total_count))
_LOG.info(end_message)
self.response.write('OK\n')
except: # Hide all errors. pylint: disable=bare-except
pass
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for displaying peer review analytics."""
__author__ = 'Sean Lip (sll@google.com)'
from models import analytics
from models import courses
from models import data_sources
from models import jobs
from models import transforms
from modules.dashboard import tabs
from modules.review import peer
class PeerReviewStatsGenerator(jobs.AbstractCountingMapReduceJob):
@staticmethod
def get_description():
return 'peer review'
def entity_class(self):
return peer.ReviewSummary
@staticmethod
def map(review_summary):
key = '%s:%s' % (review_summary.unit_id, review_summary.completed_count)
yield (key, 1)
class PeerReviewStatsSource(data_sources.SynchronousQuery):
@staticmethod
def required_generators():
return [PeerReviewStatsGenerator]
@staticmethod
def fill_values(app_context, template_values, job):
# What we want to produce as output is a list of review results for
# each unit, ordered by where the unit appears in the course.
# For each unit, we produce a dict of {unit_id, title, stats}
# The unit_id and title are from the unit itself.
#
# The 'stats' item is an array. In the 0th position of the
# array, we give the number of peer reviews requests that have
# had 0 completed responses. In the 1th position, those with
# 1 response, and so on.
# The 'stats' array in each unit's dict must be the same length,
# and thus is right-padded with zeroes as appropriate.
# First, generate a stats list for each unit. This will have
# a ragged right edge.
counts_by_unit = {}
max_completed_count = 0
for unit_and_count, quantity in jobs.MapReduceJob.get_results(job):
# Burst values
unit, completed_count = unit_and_count.rsplit(':')
completed_count = int(completed_count)
quantity = int(quantity)
max_completed_count = max(completed_count, max_completed_count)
# Ensure the array for the unit exists and is long enough.
unit_stats = counts_by_unit[unit] = counts_by_unit.get(unit, [])
unit_stats.extend([0] * (completed_count - len(unit_stats) + 1))
# Install the quantity of reviews with N responses for this unit.
unit_stats[completed_count] = quantity
# Fix the ragged right edge by padding all arrays out to a length
# corresponding to the maximum number of completed responses for
# any peer-review request.
for unit_stats in counts_by_unit.values():
unit_stats.extend([0] * (max_completed_count - len(unit_stats) + 1))
# Now march through the units, in course order and generate the
# {unit_id, title, stats} dicts used for display.
serialized_units = []
course = courses.Course(None, app_context=app_context)
for unit in course.get_peer_reviewed_units():
if unit.unit_id in counts_by_unit:
serialized_units.append({
'stats': counts_by_unit[unit.unit_id],
'title': unit.title,
'unit_id': unit.unit_id,
})
template_values.update({
'serialized_units': serialized_units,
'serialized_units_json': transforms.dumps(serialized_units),
})
def register_analytic():
data_sources.Registry.register(PeerReviewStatsSource)
name = 'peer_review'
title = 'Peer Review'
peer_review = analytics.Visualization(
name, title, 'stats.html',
data_source_classes=[PeerReviewStatsSource])
tabs.Registry.register('analytics', name, title, [peer_review])
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the review subsystem."""
__author__ = [
'johncox@google.com (John Cox)',
]
import datetime
import random
from models import counters
from models import custom_modules
from models import entities
from models import student_work
from models import utils
import models.review
from modules.review import domain
from modules.review import peer
from modules.review import stats
from google.appengine.ext import db
# In-process increment-only performance counters.
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY = counters.PerfCounter(
'gcb-pr-add-reviewer-bad-summary-key',
'number of times add_reviewer() failed due to a bad review summary key')
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN = counters.PerfCounter(
'gcb-pr-add-reviewer-set-assigner-kind-human',
("number of times add_reviewer() changed an existing step's assigner_kind "
'to ASSIGNER_KIND_HUMAN'))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP = counters.PerfCounter(
'gcb-pr-add-reviewer-create-review-step',
'number of times add_reviewer() created a new review step')
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED = counters.PerfCounter(
'gcb-pr-add-reviewer-expired-step-reassigned',
'number of times add_reviewer() reassigned an expired step')
COUNTER_ADD_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-failed',
'number of times add_reviewer() had a fatal error')
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED = counters.PerfCounter(
'gcb-pr-add-reviewer-removed-step-unremoved',
'number of times add_reviewer() unremoved a removed review step')
COUNTER_ADD_REVIEWER_START = counters.PerfCounter(
'gcb-pr-add-reviewer-start',
'number of times add_reviewer() has started processing')
COUNTER_ADD_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-add-reviewer-success',
'number of times add_reviewer() completed successfully')
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED = counters.PerfCounter(
'gcb-pr-add-reviewer-unremoved-step-failed',
('number of times add_reviewer() failed on an unremoved step with a fatal '
'error'))
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED = counters.PerfCounter(
'gcb-pr-assignment-candidates-query-results-returned',
('number of results returned by the query returned by '
'get_assignment_candidates_query()'))
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-already-removed',
('number of times delete_reviewer() called on review step with removed '
'already True'))
COUNTER_DELETE_REVIEWER_FAILED = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-failed',
'number of times delete_reviewer() had a fatal error')
COUNTER_DELETE_REVIEWER_START = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-start',
'number of times delete_reviewer() has started processing')
COUNTER_DELETE_REVIEWER_STEP_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-step-miss',
'number of times delete_reviewer() found a missing review step')
COUNTER_DELETE_REVIEWER_SUCCESS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-success',
'number of times delete_reviewer() completed successfully')
COUNTER_DELETE_REVIEWER_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-review-delete-reviewer-summary-miss',
'number of times delete_reviewer() found a missing review summary')
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION = counters.PerfCounter(
'gcb-pr-expire-review-cannot-transition',
('number of times expire_review() was called on a review step that could '
'not be transitioned to REVIEW_STATE_EXPIRED'))
COUNTER_EXPIRE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-expire-review-failed',
'number of times expire_review() had a fatal error')
COUNTER_EXPIRE_REVIEW_START = counters.PerfCounter(
'gcb-pr-expire-review-start',
'number of times expire_review() has started processing')
COUNTER_EXPIRE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-expire-review-step-miss',
'number of times expire_review() found a missing review step')
COUNTER_EXPIRE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-review-success',
'number of times expire_review() completed successfully')
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-expire-review-summary-miss',
'number of times expire_review() found a missing review summary')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-expire',
'number of records expire_old_reviews_for_unit() has expired')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-skip',
('number of times expire_old_reviews_for_unit() skipped a record due to an '
'error'))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-start',
'number of times expire_old_reviews_for_unit() has started processing')
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS = counters.PerfCounter(
'gcb-pr-expire-old-reviews-for-unit-success',
'number of times expire_old_reviews_for_unit() completed successfully')
COUNTER_EXPIRY_QUERY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-expiry-query-keys-returned',
'number of keys returned by the query returned by get_expiry_query()')
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED = counters.PerfCounter(
'gcb-pr-get-new-review-already-assigned',
('number of times get_new_review() rejected a candidate because the '
'reviewer is already assigned to or has already completed it'))
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED = counters.PerfCounter(
'gcb-pr-get-new-review-assignment-attempted',
'number of times get_new_review() attempted to assign a candidate')
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED = counters.PerfCounter(
'gcb-pr-get-new-review-cannot-unremove-completed',
('number of times get_new_review() failed because the reviewer already had '
'a completed, removed review step'))
COUNTER_GET_NEW_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-get-new-review-failed',
'number of times get_new_review() had a fatal error')
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE = counters.PerfCounter(
'gcb-pr-get-new-review-none-assignable',
'number of times get_new_review() failed to find an assignable review')
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING = counters.PerfCounter(
'gcb-pr-get-new-review-reassign-existing',
('number of times get_new_review() unremoved and reassigned an existing '
'review step'))
COUNTER_GET_NEW_REVIEW_START = counters.PerfCounter(
'gcb-pr-get-new-review-start',
'number of times get_new_review() has started processing')
COUNTER_GET_NEW_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-get-new-review-success',
'number of times get_new_review() found and assigned a new review')
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED = counters.PerfCounter(
'gcb-pr-get-new-review-summary-changed',
('number of times get_new_review() rejected a candidate because the review '
'summary changed during processing'))
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-keys-returned',
'number of keys get_review_step_keys_by() returned')
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-failed',
'number of times get_review_step_keys_by() had a fatal error')
COUNTER_GET_REVIEW_STEP_KEYS_BY_START = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-start',
'number of times get_review_step_keys_by() started processing')
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS = counters.PerfCounter(
'gcb-pr-get-review-step-keys-by-success',
'number of times get_review_step_keys_by() completed successfully')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-failed',
'number of times get_submission_and_review_step_keys() had a fatal error')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-keys-returned',
'number of keys get_submission_and_review_step_keys() returned')
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-start',
('number of times get_submission_and_review_step_keys() has begun '
'processing'))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS = (
counters.PerfCounter(
'gcb-pr-get-submission-and-review-step-keys-submission-miss',
('number of times get_submission_and_review_step_keys() failed to find '
'a submission_key')))
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS = counters.PerfCounter(
'gcb-pr-get-submission-and-review-step_keys-success',
('number of times get_submission-and-review-step-keys() completed '
'successfully'))
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED = counters.PerfCounter(
'gcb-pr-start-review-process-for-already-started',
('number of times start_review_process_for() called when review already '
'started'))
COUNTER_START_REVIEW_PROCESS_FOR_FAILED = counters.PerfCounter(
'gcb-pr-start-review-process-for-failed',
'number of times start_review_process_for() had a fatal error')
COUNTER_START_REVIEW_PROCESS_FOR_START = counters.PerfCounter(
'gcb-pr-start-review-process-for-start',
'number of times start_review_process_for() has started processing')
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS = counters.PerfCounter(
'gcb-pr-start-review-process-for-success',
'number of times start_review_process_for() completed successfully')
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-assigned-step',
'number of times write_review() transitioned an assigned step to completed')
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP = counters.PerfCounter(
'gcb-pr-write-review-completed-expired-step',
'number of times write_review() transitioned an expired step to completed')
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-created-new-review',
'number of times write_review() created a new review')
COUNTER_WRITE_REVIEW_FAILED = counters.PerfCounter(
'gcb-pr-write-review-failed',
'number of times write_review() had a fatal error')
COUNTER_WRITE_REVIEW_REVIEW_MISS = counters.PerfCounter(
'gcb-pr-write-review-review-miss',
'number of times write_review() found a missing review')
COUNTER_WRITE_REVIEW_START = counters.PerfCounter(
'gcb-pr-write-review-start',
'number of times write_review() started processing')
COUNTER_WRITE_REVIEW_STEP_MISS = counters.PerfCounter(
'gcb-pr-write-review-step-miss',
'number of times write_review() found a missing review step')
COUNTER_WRITE_REVIEW_SUMMARY_MISS = counters.PerfCounter(
'gcb-pr-write-review-summary-miss',
'number of times write_review() found a missing review summary')
COUNTER_WRITE_REVIEW_SUCCESS = counters.PerfCounter(
'gcb-pr-write-review-success',
'number of times write_review() completed successfully')
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW = counters.PerfCounter(
'gcb-pr-write-review-updated-existing-review',
'number of times write_review() updated an existing review')
# Number of entities to fetch when querying for all review steps that meet
# given criteria. Ideally we'd cursor through results rather than setting a
# ceiling, but for now let's allow as many removed results as unremoved.
_REVIEW_STEP_QUERY_LIMIT = 2 * domain.MAX_UNREMOVED_REVIEW_STEPS
class Manager(object):
"""Object that manages the review subsystem."""
@classmethod
def add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
"""Adds a reviewer for a submission.
If there is no pre-existing review step, one will be created.
Attempting to add an existing unremoved step in REVIEW_STATE_ASSIGNED or
REVIEW_STATE_COMPLETED is an error.
If there is an existing unremoved review in REVIEW_STATE_EXPIRED, it
will be put in REVIEW_STATE_ASSIGNED. If there is a removed review in
REVIEW_STATE_ASSIGNED or REVIEW_STATE_EXPIRED, it will be put in
REVIEW_STATE_ASSIGNED and unremoved. If it is in REVIEW_STATE_COMPLETED,
it will be unremoved but its state will not change. In all these cases
the assigner kind will be set to ASSIGNER_KIND_HUMAN.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
reviewer_key: db.Key of models.models.Student. The student to add as
a reviewer.
Raises:
domain.TransitionError: if there is a pre-existing review step found
in domain.REVIEW_STATE_ASSIGNED|COMPLETED.
Returns:
db.Key of written review step.
"""
try:
COUNTER_ADD_REVIEWER_START.inc()
key = cls._add_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
COUNTER_ADD_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_ADD_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _add_reviewer(cls, unit_id, submission_key, reviewee_key, reviewer_key):
found = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(submission_key, reviewer_key))
if not found:
return cls._add_new_reviewer(
unit_id, submission_key, reviewee_key, reviewer_key)
else:
return cls._add_reviewer_update_step(found)
@classmethod
def _add_new_reviewer(
cls, unit_id, submission_key, reviewee_key, reviewer_key):
summary = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee_key,
submission_key=submission_key, unit_id=unit_id)
# Synthesize summary key to avoid a second synchronous put op.
summary_key = db.Key.from_path(
peer.ReviewSummary.kind(),
peer.ReviewSummary.key_name(submission_key))
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_summary_key=summary_key, reviewee_key=reviewee_key,
reviewer_key=reviewer_key, state=domain.REVIEW_STATE_ASSIGNED,
submission_key=submission_key, unit_id=unit_id)
# pylint: disable=unbalanced-tuple-unpacking,unpacking-non-sequence
step_key, written_summary_key = entities.put([step, summary])
if summary_key != written_summary_key:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Synthesized invalid review summary key %s' % repr(summary_key))
COUNTER_ADD_REVIEWER_CREATE_REVIEW_STEP.inc()
return step_key
@classmethod
def _add_reviewer_update_step(cls, step):
should_increment_human = False
should_increment_reassigned = False
should_increment_unremoved = False
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_ADD_REVIEWER_BAD_SUMMARY_KEY.inc()
raise AssertionError(
'Found invalid review summary key %s' % repr(
step.review_summary_key))
if not step.removed:
if step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
elif (step.state == domain.REVIEW_STATE_ASSIGNED or
step.state == domain.REVIEW_STATE_COMPLETED):
COUNTER_ADD_REVIEWER_UNREMOVED_STEP_FAILED.inc()
raise domain.TransitionError(
'Unable to add new reviewer to step %s' % (
repr(step.key())),
step.state, domain.REVIEW_STATE_ASSIGNED)
else:
should_increment_unremoved = True
step.removed = False
if step.state != domain.REVIEW_STATE_EXPIRED:
summary.increment_count(step.state)
else:
should_increment_reassigned = True
step.state = domain.REVIEW_STATE_ASSIGNED
summary.decrement_count(domain.REVIEW_STATE_EXPIRED)
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
if step.assigner_kind != domain.ASSIGNER_KIND_HUMAN:
should_increment_human = True
step.assigner_kind = domain.ASSIGNER_KIND_HUMAN
step_key = entities.put([step, summary])[0]
if should_increment_human:
COUNTER_ADD_REVIEWER_SET_ASSIGNER_KIND_HUMAN.inc()
if should_increment_reassigned:
COUNTER_ADD_REVIEWER_EXPIRED_STEP_REASSIGNED.inc()
if should_increment_unremoved:
COUNTER_ADD_REVIEWER_REMOVED_STEP_UNREMOVED.inc()
return step_key
@classmethod
def delete_reviewer(cls, review_step_key):
"""Deletes the given review step.
We do not physically delete the review step; we mark it as removed,
meaning it will be ignored from most queries and the associated review
summary will have its corresponding state count decremented. Calling
this method on a removed review step is an error.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to delete.
Raises:
domain.RemovedError: if called on a review step that has already
been marked removed.
KeyError: if there is no review step with the given key, or if the
step references a review summary that does not exist.
Returns:
db.Key of deleted review step.
"""
try:
COUNTER_DELETE_REVIEWER_START.inc()
key = cls._mark_review_step_removed(review_step_key)
COUNTER_DELETE_REVIEWER_SUCCESS.inc()
return key
except Exception as e:
COUNTER_DELETE_REVIEWER_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _mark_review_step_removed(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_DELETE_REVIEWER_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_DELETE_REVIEWER_ALREADY_REMOVED.inc()
raise domain.RemovedError(
'Cannot remove step %s' % repr(review_step_key), step.removed)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_DELETE_REVIEWER_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
step.removed = True
summary.decrement_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_review(cls, review_step_key):
"""Puts a review step in state REVIEW_STATE_EXPIRED.
Args:
review_step_key: db.Key of models.student_work.ReviewStep. The
review step to expire.
Raises:
domain.RemovedError: if called on a step that is removed.
domain.TransitionError: if called on a review step that cannot be
transitioned to REVIEW_STATE_EXPIRED (that is, it is already in
REVIEW_STATE_COMPLETED or REVIEW_STATE_EXPIRED).
KeyError: if there is no review with the given key, or the step
references a review summary that does not exist.
Returns:
db.Key of the expired review step.
"""
try:
COUNTER_EXPIRE_REVIEW_START.inc()
key = cls._transition_state_to_expired(review_step_key)
COUNTER_EXPIRE_REVIEW_SUCCESS.inc()
return key
except Exception as e:
COUNTER_EXPIRE_REVIEW_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _transition_state_to_expired(cls, review_step_key):
step = entities.get(review_step_key)
if not step:
COUNTER_EXPIRE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
if step.removed:
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.RemovedError(
'Cannot transition step %s' % repr(review_step_key),
step.removed)
if step.state in (
domain.REVIEW_STATE_COMPLETED, domain.REVIEW_STATE_EXPIRED):
COUNTER_EXPIRE_REVIEW_CANNOT_TRANSITION.inc()
raise domain.TransitionError(
'Cannot transition step %s' % repr(review_step_key),
step.state, domain.REVIEW_STATE_EXPIRED)
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_EXPIRE_REVIEW_SUMMARY_MISS.inc()
raise KeyError(
'No review summary found with key %s' % repr(
step.review_summary_key))
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_EXPIRED
summary.increment_count(step.state)
return entities.put([step, summary])[0]
@classmethod
def expire_old_reviews_for_unit(cls, review_window_mins, unit_id):
"""Finds and expires all old review steps for a single unit.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
Returns:
2-tuple of list of db.Key of peer.ReviewStep. 0th element is keys
that were written successfully; 1st element is keys that we failed
to update.
"""
query = cls.get_expiry_query(review_window_mins, unit_id)
mapper = utils.QueryMapper(
query, counter=COUNTER_EXPIRY_QUERY_KEYS_RETURNED, report_every=100)
expired_keys = []
exception_keys = []
def map_fn(review_step_key, expired_keys, exception_keys):
try:
expired_keys.append(cls.expire_review(review_step_key))
except: # All errors are the same. pylint: disable=bare-except
# Skip. Either the entity was updated between the query and
# the update, meaning we don't need to expire it; or we ran into
# a transient datastore error, meaning we'll expire it next
# time.
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SKIP.inc()
exception_keys.append(review_step_key)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_START.inc()
mapper.run(map_fn, expired_keys, exception_keys)
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_EXPIRE.inc(
increment=len(expired_keys))
COUNTER_EXPIRE_OLD_REVIEWS_FOR_UNIT_SUCCESS.inc()
return expired_keys, exception_keys
@classmethod
def get_assignment_candidates_query(cls, unit_id):
"""Gets query that returns candidates for new review assignment.
New assignment candidates are scoped to a unit. We prefer first items
that have the smallest number of completed reviews, then those that have
the smallest number of assigned reviews, then those that were created
most recently.
The results of the query are user-independent.
Args:
unit_id: string. Id of the unit to restrict the query to.
Returns:
db.Query that will return [peer.ReviewSummary].
"""
return peer.ReviewSummary.all(
).filter(
peer.ReviewSummary.unit_id.name, unit_id
).order(
peer.ReviewSummary.completed_count.name
).order(
peer.ReviewSummary.assigned_count.name
).order(
peer.ReviewSummary.create_date.name)
@classmethod
def get_expiry_query(
cls, review_window_mins, unit_id, now_fn=datetime.datetime.utcnow):
"""Gets a db.Query that returns review steps to mark expired.
Results are items that were assigned by machine, are currently assigned,
are not removed, were last updated more than review_window_mins ago,
and are ordered by change date ascending.
Args:
review_window_mins: int. Number of minutes before we expire reviews
assigned by domain.ASSIGNER_KIND_AUTO.
unit_id: string. Id of the unit to restrict the query to.
now_fn: function that returns the current UTC datetime. Injectable
for tests only.
Returns:
db.Query.
"""
get_before = now_fn() - datetime.timedelta(
minutes=review_window_mins)
return peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.unit_id.name, unit_id,
).filter(
peer.ReviewStep.assigner_kind.name, domain.ASSIGNER_KIND_AUTO
).filter(
peer.ReviewStep.state.name, domain.REVIEW_STATE_ASSIGNED
).filter(
peer.ReviewStep.removed.name, False
).filter(
'%s <=' % peer.ReviewStep.change_date.name, get_before
).order(
peer.ReviewStep.change_date.name)
@classmethod
def get_new_review(
cls, unit_id, reviewer_key, candidate_count=20, max_retries=5):
"""Attempts to assign a review to a reviewer.
We prioritize possible reviews by querying review summary objects,
finding those that best satisfy cls.get_assignment_candidates_query.
To minimize write contention, we nontransactionally grab candidate_count
candidates from the head of the query results. Post-query we filter out
any candidates that are for the prospective reviewer's own work.
Then we randomly select one. We transactionally attempt to assign that
review. If assignment fails because the candidate is updated between
selection and assignment or the assignment is for a submission the
reviewer already has or has already done, we remove the candidate from
the list. We then retry assignment up to max_retries times. If we run
out of retries or candidates, we raise domain.NotAssignableError.
This is a naive implementation because it scales only to relatively low
new review assignments per second and because it can raise
domain.NotAssignableError when there are in fact assignable reviews.
Args:
unit_id: string. The unit to assign work from.
reviewer_key: db.Key of models.models.Student. The reviewer to
attempt to assign the review to.
candidate_count: int. The number of candidate keys to fetch and
attempt to assign from. Increasing this decreases the chance
that we will have write contention on reviews, but it costs 1 +
num_results datastore reads and can get expensive for large
courses.
max_retries: int. Number of times to retry failed assignment
attempts. Careful not to set this too high as a) datastore
throughput is slow and latency from this method is user-facing,
and b) if you encounter a few failures it is likely that all
candidates are now failures, so each retry past the first few is
of questionable value.
Raises:
domain.NotAssignableError: if no review can currently be assigned
for the given unit_id.
Returns:
db.Key of peer.ReviewStep. The newly created assigned review step.
"""
try:
COUNTER_GET_NEW_REVIEW_START.inc()
# Filter out candidates that are for submissions by the reviewer.
raw_candidates = cls.get_assignment_candidates_query(unit_id).fetch(
candidate_count)
COUNTER_ASSIGNMENT_CANDIDATES_QUERY_RESULTS_RETURNED.inc(
increment=len(raw_candidates))
candidates = [
candidate for candidate in raw_candidates
if candidate.reviewee_key != reviewer_key]
retries = 0
while True:
if not candidates or retries >= max_retries:
COUNTER_GET_NEW_REVIEW_NOT_ASSIGNABLE.inc()
raise domain.NotAssignableError(
'No reviews assignable for unit %s and reviewer %s' % (
unit_id, repr(reviewer_key)))
candidate = cls._choose_assignment_candidate(candidates)
candidates.remove(candidate)
assigned_key = cls._attempt_review_assignment(
candidate.key(), reviewer_key, candidate.change_date)
if not assigned_key:
retries += 1
else:
COUNTER_GET_NEW_REVIEW_SUCCESS.inc()
return assigned_key
except Exception, e:
COUNTER_GET_NEW_REVIEW_FAILED.inc()
raise e
@classmethod
def _choose_assignment_candidate(cls, candidates):
"""Seam that allows different choice functions in tests."""
return random.choice(candidates)
@classmethod
@db.transactional(xg=True)
def _attempt_review_assignment(
cls, review_summary_key, reviewer_key, last_change_date):
COUNTER_GET_NEW_REVIEW_ASSIGNMENT_ATTEMPTED.inc()
summary = entities.get(review_summary_key)
if not summary:
raise KeyError('No review summary found with key %s' % repr(
review_summary_key))
if summary.change_date != last_change_date:
# The summary has changed since we queried it. We cannot know for
# sure what the edit was, but let's skip to the next one because it
# was probably a review assignment.
COUNTER_GET_NEW_REVIEW_SUMMARY_CHANGED.inc()
return
step = peer.ReviewStep.get_by_key_name(
peer.ReviewStep.key_name(summary.submission_key, reviewer_key))
if not step:
step = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_summary_key=summary.key(),
reviewee_key=summary.reviewee_key, reviewer_key=reviewer_key,
state=domain.REVIEW_STATE_ASSIGNED,
submission_key=summary.submission_key, unit_id=summary.unit_id)
else:
if step.state == domain.REVIEW_STATE_COMPLETED:
# Reviewer has previously done this review and the review
# has been deleted. Skip to the next one.
COUNTER_GET_NEW_REVIEW_CANNOT_UNREMOVE_COMPLETED.inc()
return
if step.removed:
# We can reassign the existing review step.
COUNTER_GET_NEW_REVIEW_REASSIGN_EXISTING.inc()
step.removed = False
step.assigner_kind = domain.ASSIGNER_KIND_AUTO
step.state = domain.REVIEW_STATE_ASSIGNED
else:
# Reviewee has already reviewed or is already assigned to review
# this submission, so we cannot reassign the step.
COUNTER_GET_NEW_REVIEW_ALREADY_ASSIGNED.inc()
return
summary.increment_count(domain.REVIEW_STATE_ASSIGNED)
return entities.put([step, summary])[0]
@classmethod
def get_review_step_keys_by(cls, unit_id, reviewer_key):
"""Gets the keys of all review steps in a unit for a reviewer.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewer_key: db.Key of models.models.Student. The author of the
requested reviews.
Returns:
[db.Key of peer.ReviewStep].
"""
COUNTER_GET_REVIEW_STEP_KEYS_BY_START.inc()
try:
query = peer.ReviewStep.all(keys_only=True).filter(
peer.ReviewStep.reviewer_key.name, reviewer_key
).filter(
peer.ReviewStep.unit_id.name, unit_id
).order(
peer.ReviewStep.create_date.name,
)
keys = [key for key in query.fetch(_REVIEW_STEP_QUERY_LIMIT)]
except Exception as e:
COUNTER_GET_REVIEW_STEP_KEYS_BY_FAILED.inc()
raise e
COUNTER_GET_REVIEW_STEP_KEYS_BY_SUCCESS.inc()
COUNTER_GET_REVIEW_STEP_KEYS_BY_KEYS_RETURNED.inc(increment=len(keys))
return keys
@classmethod
def get_review_steps_by_keys(cls, keys):
"""Gets review steps by their keys.
Args:
keys: [db.Key of peer.ReviewStep]. Keys to fetch.
Returns:
[domain.ReviewStep or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_review_step(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review_step(cls, model):
if model is None:
return
return domain.ReviewStep(
assigner_kind=model.assigner_kind, change_date=model.change_date,
create_date=model.create_date, key=model.key(),
removed=model.removed, review_key=model.review_key,
review_summary_key=model.review_summary_key,
reviewee_key=model.reviewee_key, reviewer_key=model.reviewer_key,
state=model.state, submission_key=model.submission_key,
unit_id=model.unit_id
)
@classmethod
def get_reviews_by_keys(cls, keys):
"""Gets reviews by their keys.
Args:
keys: [db.Key of review.Review]. Keys to fetch.
Returns:
[domain.Review or None]. Missed keys return None in place in result
list.
"""
return [cls._make_domain_review(model) for model in entities.get(keys)]
@classmethod
def _make_domain_review(cls, model):
if model is None:
return
return domain.Review(contents=model.contents, key=model.key())
@classmethod
def get_submission_and_review_step_keys(cls, unit_id, reviewee_key):
"""Gets the submission key/review step keys for the given pair.
Note that keys for review steps marked removed are included in the
result set.
Args:
unit_id: string. Id of the unit to restrict the query to.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
domain.ConstraintError: if multiple review summary keys were found
for the given unit_id, reviewee_key pair.
KeyError: if there is no review summary for the given unit_id,
reviewee pair.
Returns:
(db.Key of Submission, [db.Key of peer.ReviewStep]) if submission
found for given unit_id, reviewee_key pair; None otherwise.
"""
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_START.inc()
try:
submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(unit_id, reviewee_key))
submission = entities.get(submission_key)
if not submission:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUBMISSION_MISS.inc(
)
return
step_keys_query = peer.ReviewStep.all(
keys_only=True
).filter(
peer.ReviewStep.submission_key.name, submission_key
)
step_keys = step_keys_query.fetch(_REVIEW_STEP_QUERY_LIMIT)
results = (submission_key, step_keys)
except Exception as e:
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_FAILED.inc()
raise e
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_SUCCESS.inc()
COUNTER_GET_SUBMISSION_AND_REVIEW_STEP_KEYS_RETURNED.inc(
increment=len(step_keys))
return results
@classmethod
def get_submissions_by_keys(cls, keys):
"""Gets submissions by their keys.
Args:
keys: [db.Key of review.Submission]. Keys to fetch.
Returns:
[domain.Submission or None]. Missed keys return None in place in
result list.
"""
return [
cls._make_domain_submission(model) for model in entities.get(keys)]
@classmethod
def _make_domain_submission(cls, model):
if model is None:
return
return domain.Submission(contents=model.contents, key=model.key())
@classmethod
def start_review_process_for(cls, unit_id, submission_key, reviewee_key):
"""Registers a new submission with the review subsystem.
Once registered, reviews can be assigned against a given submission,
either by humans or by machine. No reviews are assigned during
registration -- this method merely makes them assignable.
Args:
unit_id: string. Unique identifier for a unit.
submission_key: db.Key of models.student_work.Submission. The
submission being registered.
reviewee_key: db.Key of models.models.Student. The student who
authored the submission.
Raises:
db.BadValueError: if passed args are invalid.
domain.ReviewProcessAlreadyStartedError: if the review process has
already been started for this student's submission.
Returns:
db.Key of created ReviewSummary.
"""
try:
COUNTER_START_REVIEW_PROCESS_FOR_START.inc()
key = cls._create_review_summary(
reviewee_key, submission_key, unit_id)
COUNTER_START_REVIEW_PROCESS_FOR_SUCCESS.inc()
return key
except Exception as e:
COUNTER_START_REVIEW_PROCESS_FOR_FAILED.inc()
raise e
@classmethod
@db.transactional(xg=True)
def _create_review_summary(cls, reviewee_key, submission_key, unit_id):
collision = peer.ReviewSummary.get_by_key_name(
peer.ReviewSummary.key_name(submission_key))
if collision:
COUNTER_START_REVIEW_PROCESS_FOR_ALREADY_STARTED.inc()
raise domain.ReviewProcessAlreadyStartedError()
return peer.ReviewSummary(
reviewee_key=reviewee_key, submission_key=submission_key,
unit_id=unit_id,
).put()
@classmethod
def write_review(
cls, review_step_key, review_payload, mark_completed=True):
"""Writes a review, updating associated internal state.
If the passed step already has a review, that review will be updated. If
it does not have a review, a new one will be created with the passed
payload.
Args:
review_step_key: db.Key of peer.ReviewStep. The key of the review
step to update.
review_payload: string. New contents of the review.
mark_completed: boolean. If True, set the state of the review to
domain.REVIEW_STATE_COMPLETED. If False, leave the state as it
was.
Raises:
domain.ConstraintError: if no review found for the review step.
domain.RemovedError: if the step for the review is removed.
domain.TransitionError: if mark_completed was True but the step was
already in domain.REVIEW_STATE_COMPLETED.
KeyError: if no review step was found with review_step_key.
Returns:
db.Key of peer.ReviewStep: key of the written review step.
"""
COUNTER_WRITE_REVIEW_START.inc()
try:
step_key = cls._update_review_contents_and_change_state(
review_step_key, review_payload, mark_completed)
except Exception as e:
COUNTER_WRITE_REVIEW_FAILED.inc()
raise e
COUNTER_WRITE_REVIEW_SUCCESS.inc()
return step_key
@classmethod
@db.transactional(xg=True)
def _update_review_contents_and_change_state(
cls, review_step_key, review_payload, mark_completed):
should_increment_created_new_review = False
should_increment_updated_existing_review = False
should_increment_assigned_to_completed = False
should_increment_expired_to_completed = False
step = entities.get(review_step_key)
if not step:
COUNTER_WRITE_REVIEW_STEP_MISS.inc()
raise KeyError(
'No review step found with key %s' % repr(review_step_key))
elif step.removed:
raise domain.RemovedError(
'Unable to process step %s' % repr(step.key()), step.removed)
elif mark_completed and step.state == domain.REVIEW_STATE_COMPLETED:
raise domain.TransitionError(
'Unable to transition step %s' % repr(step.key()),
step.state, domain.REVIEW_STATE_COMPLETED)
if step.review_key:
review_to_update = entities.get(step.review_key)
if review_to_update:
should_increment_updated_existing_review = True
else:
review_to_update = student_work.Review(
contents=review_payload, reviewee_key=step.reviewee_key,
reviewer_key=step.reviewer_key, unit_id=step.unit_id)
step.review_key = db.Key.from_path(
student_work.Review.kind(),
student_work.Review.key_name(
step.unit_id, step.reviewee_key, step.reviewer_key))
should_increment_created_new_review = True
if not review_to_update:
COUNTER_WRITE_REVIEW_REVIEW_MISS.inc()
raise domain.ConstraintError(
'No review found with key %s' % repr(step.review_key))
summary = entities.get(step.review_summary_key)
if not summary:
COUNTER_WRITE_REVIEW_SUMMARY_MISS.inc()
raise domain.ConstraintError(
'No review summary found with key %s' % repr(
step.review_summary_key))
review_to_update.contents = review_payload
updated_step_key = None
if not mark_completed:
# pylint: disable=unbalanced-tuple-unpacking,unpacking-non-sequence
_, updated_step_key = entities.put([review_to_update, step])
else:
if step.state == domain.REVIEW_STATE_ASSIGNED:
should_increment_assigned_to_completed = True
elif step.state == domain.REVIEW_STATE_EXPIRED:
should_increment_expired_to_completed = True
summary.decrement_count(step.state)
step.state = domain.REVIEW_STATE_COMPLETED
summary.increment_count(step.state)
# pylint: disable=unbalanced-tuple-unpacking,unpacking-non-sequence
_, updated_step_key, _ = entities.put(
[review_to_update, step, summary])
if should_increment_created_new_review:
COUNTER_WRITE_REVIEW_CREATED_NEW_REVIEW.inc()
elif should_increment_updated_existing_review:
COUNTER_WRITE_REVIEW_UPDATED_EXISTING_REVIEW.inc()
if should_increment_assigned_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_ASSIGNED_STEP.inc()
elif should_increment_expired_to_completed:
COUNTER_WRITE_REVIEW_COMPLETED_EXPIRED_STEP.inc()
return updated_step_key
custom_module = None
def register_module():
"""Registers this module in the registry."""
# Avert circular dependency
from modules.review import cron
stats.register_analytic()
# register this peer review implementation
models.review.ReviewsProcessor.set_peer_matcher(Manager)
# register cron handler
cron_handlers = [(
'/cron/expire_old_assigned_reviews',
cron.ExpireOldAssignedReviewsHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Peer Review Engine',
'A set of classes for managing peer review process.',
cron_handlers, [])
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and constants for use by internal and external clients."""
__author__ = [
'johncox@google.com (John Cox)',
]
# Identifier for reviews that have been computer-assigned.
ASSIGNER_KIND_AUTO = 'AUTO'
# Identifier for reviews that have been assigned by a human.
ASSIGNER_KIND_HUMAN = 'HUMAN'
ASSIGNER_KINDS = (
ASSIGNER_KIND_AUTO,
ASSIGNER_KIND_HUMAN,
)
# Maximum number of ReviewSteps with removed = False, in any REVIEW_STATE, that
# can exist in the backend at a given time.
MAX_UNREMOVED_REVIEW_STEPS = 100
# State of a review that is currently assigned, either by a human or by machine.
REVIEW_STATE_ASSIGNED = 'ASSIGNED'
# State of a review that is complete and may be shown to the reviewee, provided
# the reviewee is themself in a state to see their reviews.
REVIEW_STATE_COMPLETED = 'COMPLETED'
# State of a review that used to be assigned but the assignment has been
# expired. Only machine-assigned reviews can be expired.
REVIEW_STATE_EXPIRED = 'EXPIRED'
REVIEW_STATES = (
REVIEW_STATE_ASSIGNED,
REVIEW_STATE_COMPLETED,
REVIEW_STATE_EXPIRED,
)
class Error(Exception):
"""Base error class."""
class ConstraintError(Error):
"""Raised when data is found indicating a constraint is violated."""
class NotAssignableError(Error):
"""Raised when review assignment is requested but cannot be satisfied."""
class RemovedError(Error):
"""Raised when an op cannot be performed on a step because it is removed."""
def __init__(self, message, value):
"""Constructs a new RemovedError."""
super(RemovedError, self).__init__(message)
self.value = value
def __str__(self):
return '%s: removed is %s' % (self.message, self.value)
class ReviewProcessAlreadyStartedError(Error):
"""Raised when someone attempts to start a review process in progress."""
class TransitionError(Error):
"""Raised when an invalid state transition is attempted."""
def __init__(self, message, before, after):
"""Constructs a new TransitionError.
Args:
message: string. Exception message.
before: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition from.
after: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition to.
"""
super(TransitionError, self).__init__(message)
self.after = after
self.before = before
def __str__(self):
return '%s: attempted to transition from %s to %s' % (
self.message, self.before, self.after)
class Review(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
class ReviewStep(object):
"""Domain object for the status of a single review at a point in time."""
def __init__(
self, assigner_kind=None, change_date=None, create_date=None, key=None,
removed=None, review_key=None, review_summary_key=None,
reviewee_key=None, reviewer_key=None, state=None, submission_key=None,
unit_id=None):
self._assigner_kind = assigner_kind
self._change_date = change_date
self._create_date = create_date
self._key = key
self._removed = removed
self._review_key = review_key
self._review_summary_key = review_summary_key
self._reviewee_key = reviewee_key
self._reviewer_key = reviewer_key
self._state = state
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigner_kind(self):
return self._assigner_kind
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def is_assigned(self):
"""Predicate for whether the step is in REVIEW_STATE_ASSIGNED."""
return self.state == REVIEW_STATE_ASSIGNED
@property
def is_completed(self):
"""Predicate for whether the step is in REVIEW_STATE_COMPLETED."""
return self.state == REVIEW_STATE_COMPLETED
@property
def is_expired(self):
"""Predicate for whether the step is in REVIEW_STATE_EXPIRED."""
return self.state == REVIEW_STATE_EXPIRED
@property
def key(self):
return self._key
@property
def removed(self):
return self._removed
@property
def review_key(self):
return self._review_key
@property
def review_summary_key(self):
return self._review_summary_key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def reviewer_key(self):
return self._reviewer_key
@property
def state(self):
return self._state
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class ReviewSummary(object):
"""Domain object for review state aggregate entities."""
def __init__(
self, assigned_count=None, completed_count=None, change_date=None,
create_date=None, key=None, reviewee_key=None, submission_key=None,
unit_id=None):
self._assigned_count = assigned_count
self._completed_count = completed_count
self._change_date = change_date
self._create_date = create_date
self._key = key
self._reviewee_key = reviewee_key
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigned_count(self):
return self._assigned_count
@property
def completed_count(self):
return self._completed_count
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def key(self):
return self._key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class Submission(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal implementation details of the peer review subsystem.
Public classes, including domain objects, can be found in domain.py and
models/student_work.py. Entities declared here should not be used by external
clients.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
from models import counters
from models import models
from models import student_work
from modules.review import domain
from google.appengine.ext import db
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX = counters.PerfCounter(
'gcb-pr-increment-count-count-aggregate-exceeded-max',
('number of times increment_count() failed because the new aggregate of '
'the counts would have exceeded domain.MAX_UNREMOVED_REVIEW_STEPS'))
class ReviewSummary(student_work.BaseEntity):
"""Object that tracks the aggregate state of reviews for a submission."""
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Strong counters. Callers should never manipulate these directly. Instead,
# use decrement|increment_count.
# Count of ReviewStep entities for this submission currently in state
# STATE_ASSIGNED.
assigned_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_COMPLETED.
completed_count = db.IntegerProperty(default=0, required=True)
# Count of ReviewStep entities for this submission currently in state
# STATE_EXPIRED.
expired_count = db.IntegerProperty(default=0, required=True)
# Key of the student who wrote the submission being reviewed.
reviewee_key = student_work.KeyProperty(
kind=models.Student.kind(), required=True)
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Identifier of the unit this review is a part of.
unit_id = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewSummary."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
submission_key = kwargs.get('submission_key')
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key)
super(ReviewSummary, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key):
"""Creates a key_name string for datastore operations."""
return '(review_summary:%s)' % submission_key.id_or_name()
@classmethod
def safe_key(cls, db_key, transform_fn):
_, _, unit_id, unsafe_reviewee_key_name = cls._split_key(db_key.name())
unsafe_reviewee_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewee_key_name)
unsafe_submission_key = student_work.Submission.get_key(
unit_id, unsafe_reviewee_key)
safe_submission_key = student_work.Submission.safe_key(
unsafe_submission_key, transform_fn)
return db.Key.from_path(cls.kind(), cls.key_name(safe_submission_key))
def _check_count(self):
count_sum = (
self.assigned_count + self.completed_count + self.expired_count)
if count_sum >= domain.MAX_UNREMOVED_REVIEW_STEPS:
COUNTER_INCREMENT_COUNT_COUNT_AGGREGATE_EXCEEDED_MAX.inc()
raise db.BadValueError(
'Unable to increment %s to %s; max is %s' % (
self.kind(), count_sum, domain.MAX_UNREMOVED_REVIEW_STEPS))
def decrement_count(self, state):
"""Decrements the count for the given state enum; does not save.
Args:
state: string. State indicating counter to decrement; must be one of
domain.REVIEW_STATES.
Raises:
ValueError: if state not in domain.REVIEW_STATES.
"""
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count -= 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count -= 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count -= 1
else:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
def increment_count(self, state):
"""Increments the count for the given state enum; does not save.
Args:
state: string. State indicating counter to increment; must be one of
domain.REVIEW_STATES.
Raises:
db.BadValueError: if incrementing the counter would cause the sum of
all *_counts to exceed domain.MAX_UNREMOVED_REVIEW_STEPS.
ValueError: if state not in domain.REVIEW_STATES
"""
if state not in domain.REVIEW_STATES:
raise ValueError('%s not in %s' % (state, domain.REVIEW_STATES))
self._check_count()
if state == domain.REVIEW_STATE_ASSIGNED:
self.assigned_count += 1
elif state == domain.REVIEW_STATE_COMPLETED:
self.completed_count += 1
elif state == domain.REVIEW_STATE_EXPIRED:
self.expired_count += 1
def for_export(self, transform_fn):
model = super(ReviewSummary, self).for_export(transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
model.submission_key = student_work.Submission.safe_key(
model.submission_key, transform_fn)
return model
class ReviewStep(student_work.BaseEntity):
"""Object that represents a single state of a review."""
# Audit trail information.
# Identifier for the kind of thing that did the assignment. Used to
# distinguish between assignments done by humans and those done by the
# review subsystem.
assigner_kind = db.StringProperty(
choices=domain.ASSIGNER_KINDS, required=True)
# UTC last modification timestamp.
change_date = db.DateTimeProperty(auto_now=True, required=True)
# UTC create date.
create_date = db.DateTimeProperty(auto_now_add=True, required=True)
# Repeated data to allow filtering/ordering in queries.
# Key of the submission being reviewed.
submission_key = student_work.KeyProperty(
kind=student_work.Submission.kind(), required=True)
# Unit this review step is part of.
unit_id = db.StringProperty(required=True)
# State information.
# State of this review step.
state = db.StringProperty(choices=domain.REVIEW_STATES, required=True)
# Whether or not the review has been removed. By default removed entities
# are ignored for most queries.
removed = db.BooleanProperty(default=False)
# Pointers that tie the work and people involved together.
# Key of the Review associated with this step.
review_key = student_work.KeyProperty(kind=student_work.Review.kind())
# Key of the associated ReviewSummary.
review_summary_key = student_work.KeyProperty(kind=ReviewSummary.kind())
# Key of the Student being reviewed.
reviewee_key = student_work.KeyProperty(kind=models.Student.kind())
# Key of the Student doing this review.
reviewer_key = student_work.KeyProperty(kind=models.Student.kind())
def __init__(self, *args, **kwargs):
"""Constructs a new ReviewStep."""
assert not kwargs.get('key_name'), (
'Setting key_name manually not supported')
reviewer_key = kwargs.get('reviewer_key')
submission_key = kwargs.get('submission_key')
assert reviewer_key, 'Missing required reviewer_key property'
assert submission_key, 'Missing required submission_key property'
kwargs['key_name'] = self.key_name(submission_key, reviewer_key)
super(ReviewStep, self).__init__(*args, **kwargs)
@classmethod
def key_name(cls, submission_key, reviewer_key):
"""Creates a key_name string for datastore operations."""
return '(review_step:%s:%s)' % (
submission_key.id_or_name(), reviewer_key.id_or_name())
@classmethod
def safe_key(cls, db_key, transform_fn):
"""Constructs a version of the entitiy's key that is safe for export."""
cls._split_key(db_key.name())
name = db_key.name().strip('()')
unsafe_submission_key_name, unsafe_reviewer_id_or_name = name.split(
':', 1)[1].rsplit(':', 1)
unsafe_reviewer_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewer_id_or_name)
safe_reviewer_key = models.Student.safe_key(
unsafe_reviewer_key, transform_fn)
# Treating as module-protected. pylint: disable=protected-access
_, unit_id, unsafe_reviewee_key_name = (
student_work.Submission._split_key(unsafe_submission_key_name))
unsafe_reviewee_key = db.Key.from_path(
models.Student.kind(), unsafe_reviewee_key_name)
unsafe_submission_key = student_work.Submission.get_key(
unit_id, unsafe_reviewee_key)
safe_submission_key = student_work.Submission.safe_key(
unsafe_submission_key, transform_fn)
return db.Key.from_path(
cls.kind(), cls.key_name(safe_submission_key, safe_reviewer_key))
def for_export(self, transform_fn):
"""Creates a version of the entity that is safe for export."""
model = super(ReviewStep, self).for_export(transform_fn)
model.review_key = student_work.Review.safe_key(
model.review_key, transform_fn)
model.review_summary_key = ReviewSummary.safe_key(
model.review_summary_key, transform_fn)
model.reviewee_key = models.Student.safe_key(
model.reviewee_key, transform_fn)
model.reviewer_key = models.Student.safe_key(
model.reviewer_key, transform_fn)
model.submission_key = student_work.Submission.safe_key(
model.submission_key, transform_fn)
return model
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External task balancer.
Overall architecture is:
1. Users interact with clients.
2. Clients make requests against the frontend's REST API.
3. The FE makes a REST call against a worker or worker pool identified by
gcb_external_task_balancer_worker_url. The FE provisions a unique token,
generates a Task instance, and dispatches a REST request to the worker or
worker pool.
4. The worker or worker pool exposes a REST API for use by the FE. Worker
responses contain the name of the worker so the FE can poll a specific worker
for results using the (ticket, name) combination. Workers are in charge both
of doing work and of cleaning up their results. Clients do not talk to
workers directly.
To enable, set up a pool of workers behind a single URL. For example, this might
be a set of machines behind a balancer on GCE or an AWS ELB. Next, set
gcb_external_task_balancer_rest_enabled to True and set
gcb_external_task_balancer_worker_url to the URL of your worker pool. Secure
communication if desired, and write a client against the REST API this module
exposes.
This implementation has the following big limitations:
1. It is insecure. Currently there is no token exchange/validation at the API
level, so anyone who gets a ticket (for example, by listening to HTTP
traffic between clients and the FE) can issue API calls.
2. There is no XSSI/XSRF protection. Note that exposed endpoints will 404 by
default because gcb_external_task_balancer_rest_enabled is False, so the
behavior without overrides does *not* expose unprotected REST endpoints.
3. Old task items hang around forever. Could implement garbage collection cron
to remove them past a TTL.
4. The REST api is missing ability to mark a single task for deletion and to
fetch a paginated list of results (without their payloads) for a given
user_id. Open issue: we do not expose the notion of a project in the REST
API, but we have it in the workers. Should we expose it to allow filtering at
the API level?
5. Add support for one balancer handling multiple pools of workers, not just
one.
6. Manager.mark* methods don't all check that the requested status transition is
valid. This means buggy handlers/workers/clients could cause invalid status
transitions. Fix is to have the Manager throw TransitionError in those cases
and modify the handlers to 400/500.
TODO(johncox): add URL of sample worker implementation once it's finished.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import urllib
from controllers import utils
from models import config
from models import custom_modules
from models import entities
from models import transforms
from google.appengine.api import urlfetch
from google.appengine.ext import db
_DISABLE_CACHING_HEADERS = {
'Cache-Control': 'max-age=0, must-revalidate',
'Pragma': 'no-cache',
}
_PAYLOAD = 'payload'
_TICKET = 'ticket'
_PROJECT_NAME = 'project'
_REST_URL_BASE = '/rest/balancer/v1'
_REST_URL_PROJECT = _REST_URL_BASE + '/project'
_REST_URL_TASK = _REST_URL_BASE
_STATUS = 'status'
_USER_ID = 'user_id'
_WORKER_DEADLINE_SECONDS = 5
_WORKER_ID = 'worker_id'
_WORKER_LOCKED = 'Worker locked'
_WORKER_LOCKED_MAX_RETRIES = 3
_LOG = logging.getLogger('modules.balancer.balancer')
logging.basicConfig()
EXTERNAL_TASK_BALANCER_REST_ENABLED = config.ConfigProperty(
'gcb_external_task_balancer_rest_enabled', bool,
('Whether or not to enable the REST endpoints for the external task '
'balancer module. You must also set gcb_external_task_balancer_worker_url '
'to use this feature.'), default_value=False)
EXTERNAL_TASK_BALANCER_WORKER_URL = config.ConfigProperty(
'gcb_external_task_balancer_worker_url', str,
'URL for the worker pool used by the external task balancer module.',
default_value='')
class Error(Exception):
"""Base error class."""
class NotFoundError(Exception):
"""Raised when an op that needs an entity is run with a missing entity."""
class TransitionError(Exception):
"""Raised when an op attempts an invalid transition on a task."""
def _from_json(json_str):
"""Turns json -> object (or None if json cannot be parsed)."""
try:
return transforms.loads(json_str)
except: # Deliberately catching everything. pylint: disable=bare-except
return None
class Manager(object):
"""DAO for external tasks."""
# Treating access as module-protected. pylint: disable=protected-access
@classmethod
def create(cls, user_id=None):
"""Creates task and returns ticket string."""
task = _ExternalTask(status=_ExternalTask.CREATED, user_id=user_id)
return _ExternalTask.get_ticket_by_key(db.put(task))
@classmethod
def get(cls, ticket):
"""Gets task for ticket (or None if no matching task)."""
external_task = db.get(_ExternalTask.get_key_by_ticket(ticket))
if not external_task:
return None
return Task._from_external_task(external_task)
@classmethod
def list(cls, user_id):
"""Returns list of Task matching user_id, ordered by create date."""
return [Task._from_external_task(et) for et in sorted(
_ExternalTask.all().filter(
'%s =' % _ExternalTask.user_id.name, user_id
).fetch(1000), key=lambda task: task.create_date)]
@classmethod
@db.transactional
def mark_deleted(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.DELETED
db.put(task)
@classmethod
@db.transactional
def mark_done(cls, ticket, status, result):
if status not in _ExternalTask._TERMINAL_STATUSES:
raise TransitionError(
'mark_done called with non-terminal status ' + status)
task = cls._get_or_raise_not_found_error(ticket)
task.result = result
task.status = status
db.put(task)
@classmethod
@db.transactional
def mark_failed(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.FAILED
db.put(task)
@classmethod
@db.transactional
def mark_running(cls, ticket, worker_id):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.RUNNING
task.worker_id = worker_id
db.put(task)
@classmethod
def _delete(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
db.delete(key)
@classmethod
def _get_or_raise_not_found_error(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
task = db.get(key)
if not task:
raise NotFoundError
return task
class Task(object):
"""DTO for external tasks."""
def __init__(
self, change_date, create_date, result, status, ticket, user_id,
worker_id):
self.change_date = change_date
self.create_date = create_date
self.result = result
self.status = status
self.ticket = ticket
self.user_id = user_id
self.worker_id = worker_id
@classmethod
def _from_external_task(cls, external_task):
return cls(
external_task.change_date, external_task.create_date,
external_task.result, external_task.status,
external_task.get_ticket(), external_task.user_id,
external_task.worker_id)
def is_done(self):
return _ExternalTask.is_status_terminal(self.status)
def for_json(self):
return {
'change_date': self.change_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'create_date': self.create_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'result': self.result,
'status': self.status,
'ticket': self.ticket,
'user_id': self.user_id,
'worker_id': self.worker_id,
}
def __eq__(self, other):
return (
isinstance(other, Task) and
self.change_date == other.change_date and
self.create_date == other.create_date and
self.result == other.result and
self.status == other.status and
self.ticket == other.ticket and
self.user_id == other.user_id and
self.worker_id == other.worker_id)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return (
'Task - change_date: %(change_date)s, '
'create_date: %(create_date)s, result: %(result)s, '
'status: %(status)s, ticket: %(ticket)s, user_id: %(user_id)s, '
'worker_id: %(worker_id)s' % self.to_dict())
class _ExternalTask(entities.BaseEntity):
"""Storage for external tasks."""
# States a task may be in.
COMPLETE = 'complete' # Done running and in known success state.
CREATED = 'created' # Datastore entity created, but task not yet running.
DELETED = 'deleted' # Marked for deletion; could be deleted later.
FAILED = 'failed' # Done running and in known failure state.
RUNNING = 'running' # Currently running on a worker.
_PENDING_STATUSES = frozenset([
CREATED,
RUNNING,
])
_TERMINAL_STATUSES = frozenset([
COMPLETE,
DELETED,
FAILED,
])
STATUSES = _PENDING_STATUSES.union(_TERMINAL_STATUSES)
# When the task was last edited.
change_date = db.DateTimeProperty(required=True, auto_now=True)
# When the task was created.
create_date = db.DateTimeProperty(required=True, auto_now_add=True)
# Output of the task in JSON.
result = db.TextProperty()
# Last observed status of the task. Can be inaccurate: for example, if a
# user creates a new task but navigates away before the task completes and
# their client never fetches the task when it's done, we'll still show it
# running.
status = db.StringProperty(required=True, choices=STATUSES)
# Optional identifier for the user who owns the task. We impose no
# restrictions beyond the identifier being a string <= 500B, per datastore.
user_id = db.StringProperty()
# Identifier for the worker.
worker_id = db.StringProperty()
@classmethod
def get_key_by_ticket(cls, ticket_str):
try:
return db.Key(encoded=ticket_str)
except:
raise ValueError(
'Cannot make _ExternalTask key from ticket value: %s' % (
ticket_str))
@classmethod
def get_ticket_by_key(cls, key):
return str(key)
@classmethod
def is_status_terminal(cls, status):
return status in cls._TERMINAL_STATUSES
def get_ticket(self):
"""Returns string identifier for the task; raises NotSavedError."""
return self.get_ticket_by_key(self.key())
class _Operation(object):
"""Base class for wire operation payloads."""
@classmethod
def from_str(cls, raw_str):
return cls._from_json(transforms.loads(raw_str))
@classmethod
def _from_json(cls, parsed):
# Parse and validate raw input, raising ValueError if necessary.
raise NotImplementedError
def ready(self):
"""True iff the operation has all data it needs to be issued."""
raise NotImplementedError
def to_json(self):
return transforms.dumps(self._to_dict())
def to_url(self):
return urllib.quote_plus(self.to_json())
def update(self, updates_dict):
for k, v in updates_dict.iteritems():
if not hasattr(self, k):
raise ValueError('Cannot set name ' + k)
setattr(self, k, v)
def _to_dict(self):
raise NotImplementedError
class _CreateTaskOperation(_Operation):
def __init__(self, payload, ticket, user_id):
self.payload = payload
self.ticket = ticket
self.user_id = user_id
@classmethod
def _from_json(cls, parsed):
return cls(parsed, None, parsed.get(_USER_ID))
def ready(self):
return self.payload is not None and self.ticket is not None
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_USER_ID: self.user_id,
}
class _GetProjectOperation(_Operation):
def __init__(self, payload):
self.payload = payload
@classmethod
def _from_json(cls, parsed):
return cls(parsed)
def ready(self):
return self.payload is not None
def _to_dict(self):
return {_PAYLOAD: self.payload}
class _GetTaskOperation(_Operation):
def __init__(self, payload, ticket, worker_id):
self.payload = payload
self.ticket = ticket
self.worker_id = worker_id
@classmethod
def _from_json(cls, parsed):
ticket = parsed.get(_TICKET)
if not ticket:
raise ValueError('%s not set' % _TICKET)
return cls(parsed, ticket, parsed.get(_WORKER_ID))
def ready(self):
return (
self.payload is not None and self.ticket is not None and
self.worker_id is not None)
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_WORKER_ID: self.worker_id,
}
class _WorkerPool(object):
"""Interface for the pool of machines that do background work."""
@classmethod
def _check_response(cls, response):
return response.has_key(_PAYLOAD)
@classmethod
def _do_fetch(cls, url, method, operation):
try:
response = urlfetch.fetch(
cls._get_url(url, method, operation),
deadline=_WORKER_DEADLINE_SECONDS,
headers=_DISABLE_CACHING_HEADERS, method=method,
payload=cls._get_request_body(method, operation))
return (
response.status_code, cls._transform_response(response))
except urlfetch.DownloadError as e: # 4xx, 5xx, timeouts.
_LOG.error('Unable to dispatch request to pool; error: %s', e)
return 500, {_PAYLOAD: 'Unable to dispatch request'}
@classmethod
def _get_base_url(cls, worker_id=None):
base = (
worker_id if worker_id is not None else
EXTERNAL_TASK_BALANCER_WORKER_URL.value)
return base + '/rest/v1'
@classmethod
def _get_create_task_url(cls):
return cls._get_base_url()
@classmethod
def _get_get_project_url(cls):
return cls._get_base_url() + '/project'
@classmethod
def _get_get_task_url(cls, worker_id):
return cls._get_base_url(worker_id=worker_id)
@classmethod
def _get_request_body(cls, method, operation):
if method == 'GET':
return None
return operation.to_json()
@classmethod
def _get_url(cls, url, method, operation):
if method == 'GET':
return '%s?request=%s' % (url, operation.to_url())
return url
@classmethod
def _transform_response(cls, response):
"""Transforms worker success/error responses into a standard format."""
try:
parsed = transforms.loads(response.content)
if not cls._check_response(parsed):
raise ValueError
return {_PAYLOAD: parsed[_PAYLOAD]}
except: # Catch everything on purpose. pylint: disable=bare-except
_LOG.error(
'Unable to parse worker response: ' + response.content)
return {_PAYLOAD: 'Received invalid response'}
@classmethod
def create_task(cls, operation):
return cls._do_fetch(cls._get_create_task_url(), 'POST', operation)
@classmethod
def get_project(cls, operation):
return cls._do_fetch(cls._get_get_project_url(), 'GET', operation)
@classmethod
def get_task(cls, operation):
return cls._do_fetch(
cls._get_get_task_url(operation.worker_id), 'GET', operation)
class _BaseRestHandler(utils.BaseRESTHandler):
def _send_json_response(self, code, response):
self.response.headers['Content-Disposition'] = 'attachment'
self.response.headers['Content-Type'] = (
'application/javascript; charset=utf-8')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.status_code = code
self.response.write(transforms.dumps(response))
def _check_config_or_send_error(self):
if not EXTERNAL_TASK_BALANCER_REST_ENABLED.value:
self._send_json_response(404, 'Not found.')
return False
elif not EXTERNAL_TASK_BALANCER_WORKER_URL.value:
self._send_json_response(500, 'No worker pool found.')
return False
return True
class _ProjectRestHandler(_BaseRestHandler):
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetProjectOperation.from_str(self.request.get('request'))
except ValueError:
self._send_json_response(400, 'Bad request')
return
self._send_json_response(*_WorkerPool.get_project(op))
class _TaskRestHandler(_BaseRestHandler):
def _get_payload(self, response):
return response.get(_PAYLOAD)
def _get_status(self, response):
return self._get_payload(response).get(_STATUS)
def _get_task_payload(self, response):
return response.get(_PAYLOAD).get(_PAYLOAD)
def _get_ticket(self, response):
return self._get_payload(response).get(_TICKET)
def _get_worker_id(self, response):
return self._get_payload(response).get(_WORKER_ID)
def _retry_create_task(self, response, op):
tries = 0
while tries < _WORKER_LOCKED_MAX_RETRIES:
tries += 1
_LOG.info('Worker locked; retrying (tries: %s)', tries)
code, response = _WorkerPool.create_task(op)
if not self._worker_locked(response):
return code, response
return code, {_PAYLOAD: _WORKER_LOCKED}
def _worker_locked(self, response):
return response.get(_PAYLOAD) == _WORKER_LOCKED
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetTaskOperation.from_str(self.request.get('request'))
except: # pylint: disable=bare-except
self._send_json_response(400, 'Bad request')
return
task = None
try:
task = Manager.get(op.ticket)
except ValueError:
pass # Invalid ticket; handle as 404.
if not task:
self._send_json_response(
404, 'Task not found for ticket %s' % op.ticket)
return
if task.is_done():
self._send_json_response(200, task.for_json())
return
op.update({_WORKER_ID: task.worker_id})
if not op.ready():
# If the operation cannot be issued now, the most likely cause is
# that a past response from a worker contained insufficient data to
# dispatch requests to that worker (for example, it might not have)
# set the worker_id). We cannot recover; all we can do is signal
# likely programmer error.
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.get_task(op)
if code != 200:
self._send_json_response(code, response)
return
status = self._get_status(response)
if status is None:
self._send_json_response(500, 'Worker sent partial response')
return
elif _ExternalTask.is_status_terminal(status):
try:
payload = self._get_task_payload(response)
Manager.mark_done(op.ticket, status, payload)
except: # Catch everything. pylint: disable=bare-except
# TODO(johncox): could differentiate here and transition to a
# failed state when the payload is too big so we don't force
# unnecessary refetches against workers.
self._send_json_response(
500, 'Invalid worker status or payload too big')
return
self._send_json_response(*_WorkerPool.get_task(op))
def post(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _CreateTaskOperation.from_str(self.request.get('request'))
except: # pylint: disable=bare-except
self._send_json_response(400, 'Bad request')
return
# Must allocate ticket at storage level for wire ops against worker, so
# we cannot create the task in one datastore call.
ticket = Manager.create(user_id=op.user_id)
op.update({_TICKET: ticket})
if not op.ready():
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.create_task(op)
if self._worker_locked(response):
code, response = self._retry_create_task(response, op)
if code != 200:
Manager.mark_failed(ticket)
self._send_json_response(500, self._get_payload(response))
return
request_failed = code != 200
ticket_mismatch = self._get_ticket(response) != ticket
if request_failed or ticket_mismatch:
response = 'Ticket mismatch' if ticket_mismatch else 'Worker failed'
Manager.mark_failed(ticket)
self._send_json_response(500, response)
else: # Worker response indicates success.
Manager.mark_running(ticket, self._get_worker_id(response))
self._send_json_response(code, response)
custom_module = None
def register_module():
global custom_module # pylint: disable=global-statement
global_handlers = [
(_REST_URL_TASK, _TaskRestHandler),
(_REST_URL_PROJECT, _ProjectRestHandler),
]
namespaced_handlers = []
custom_module = custom_modules.Module(
'External Task Balancer', 'External Task Balancer', global_handlers,
namespaced_handlers)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes providing REST data sources for common CourseBuilder items."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import schema_fields
from common import utils
from models import courses
from models import data_sources
from models import entity_transforms
from models import jobs
from models import models
from models import transforms
from tools import verify
class AssessmentsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'assessments'
@classmethod
def get_title(cls):
return 'Assessments'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Analytics',
description='Sets of questions determining student skill')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='Key uniquely identifying this particular assessment'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the assessment'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number',
description='Scalar indicating how the results of this assessment '
'are to be weighted versus the results of peer assessments.'))
reg.add_property(schema_fields.SchemaField(
'html_check_answers', 'Check Answers', 'boolean',
description='Whether students may check their answers before '
'submitting the assessment.'))
reg.add_property(schema_fields.SchemaField(
'props', 'Properties', 'string',
description='JSON string containing key/value additional '
'properties, not further defined.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
assessments = course.get_units_of_type(verify.UNIT_TYPE_ASSESSMENT)
ret = []
for assessment in assessments:
ret.append({
'unit_id': str(assessment.unit_id),
'title': assessment.title,
'weight': assessment.weight,
'html_check_answers': assessment.html_check_answers,
'props': transforms.dumps(assessment.properties)})
return ret, 0
class UnitsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'units'
@classmethod
def get_title(cls):
return 'Units'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Units',
description='Sets of lessons providing course content')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='Key uniquely identifying this particular unit'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the unit'))
reg.add_property(schema_fields.SchemaField(
'props', 'Properties', 'object',
'Set of key/value additional properties, not further defined.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
units = course.get_units_of_type(verify.UNIT_TYPE_UNIT)
ret = []
for unit in units:
ret.append({
'unit_id': str(unit.unit_id),
'title': unit.title,
'props': unit.properties,
})
return ret, 0
class LessonsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'lessons'
@classmethod
def get_title(cls):
return 'Lessons'
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Lessons',
description='Sets of lessons providing course content')
reg.add_property(schema_fields.SchemaField(
'lesson_id', 'Unit ID', 'string',
description='Key uniquely identifying which lesson this is'))
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='Key uniquely identifying unit lesson is in'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title describing the unit'))
reg.add_property(schema_fields.SchemaField(
'scored', 'Scored', 'boolean',
description='Boolean: Whether questions in this lesson count '
'for scoring.'))
reg.add_property(schema_fields.SchemaField(
'has_activity', 'Has Activity', 'boolean',
description='Boolean: does this lesson contain an activity?'))
reg.add_property(schema_fields.SchemaField(
'activity_title', 'Activity Title', 'string',
description='Title of the activity (if lesson has an activity)'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, *args, **kwargs):
course = courses.Course(handler=None, app_context=app_context)
lessons = course.get_lessons_for_all_units()
ret = []
for lesson in lessons:
ret.append({
'lesson_id': str(lesson.unit_id),
'unit_id': str(lesson.unit_id),
'title': lesson.title,
'scored': lesson.scored,
'has_activity': lesson.has_activity,
'activity_title': lesson.activity_title,
})
return ret, 0
class StudentAssessmentScoresDataSource(
data_sources.AbstractDbTableRestDataSource):
"""Unpack student assessment scores from student record.
NOTE: Filtering/ordering, if present, will be done based on Student
attributes, not scores. (The scores are in an encoded string in a
field which is not indexed anyhow.) The only meaningful field to
index or filter on is enrolled_on.
"""
@classmethod
def get_name(cls):
return 'assessment_scores'
@classmethod
def get_title(cls):
return 'Assessment Scores'
@classmethod
def get_context_class(cls):
return data_sources.DbTableContext
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry('Unit',
description='Course sub-components')
reg.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string',
description='Student ID encrypted with a session-specific key'))
reg.add_property(schema_fields.SchemaField(
'id', 'Unit ID', 'string',
description='ID of assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Title of the assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'score', 'Score', 'integer',
description='Value from 0 to 100 indicating % correct.'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number',
description='Weight applied to the score for computing total '
'grade.'))
reg.add_property(schema_fields.SchemaField(
'attempted', 'Attempted', 'boolean',
description='Whether the assessment was attempted.'))
reg.add_property(schema_fields.SchemaField(
'completed', 'Completed', 'boolean',
description='Whether the assessment was completed.'))
reg.add_property(schema_fields.SchemaField(
'human_graded', 'Human Graded', 'boolean',
description='Score is from a human (vs. automatic) grading.'))
reg.add_property(schema_fields.SchemaField(
'assessment_rank', 'Assessment Rank', 'integer',
description='Rank of assessment from zero to number of assessments '
'- 1, in order by total score achieved by all students taking that '
'assessment.'))
reg.add_property(schema_fields.SchemaField(
'user_rank', 'User Rank', 'integer',
description='Rank of student from zero to number of students '
'- 1, in order by total score achieved on all assessments taken '
'by that student.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def get_entity_class(cls):
return models.Student
@classmethod
def _postprocess_rows(cls, app_context, source_context,
unused_schema, unused_log, unused_page_number,
students):
transform_fn = cls._build_transform_fn(source_context)
with utils.Namespace(app_context.get_namespace_name()):
course = courses.Course(handler=None, app_context=app_context)
students_with_scores = [s for s in students if s.scores]
student_scores = []
for student in students_with_scores:
scores = course.get_all_scores(student)
for score in scores:
if not score['attempted']:
continue
# user_id is PII and must be encoded to obscure its value.
score['user_id'] = transform_fn(student.user_id)
student_scores.append(score)
# Provide a ranking by student, 0 ... #students, low to high.
scored_students = {}
for score in student_scores:
current_score = scored_students.get(score['user_id'], 0)
scored_students[score['user_id']] = current_score + (
score['weight'] * score['score'])
ranked_students = {kv[0]: rank for rank, kv in
enumerate(
sorted(scored_students.items(),
lambda i1, i2: cmp(i1[1], i2[1])))}
# Provide a ranking by assessment, 0 ... #assessments, low to high
scored_assessments = {}
for score in student_scores:
title = score['title']
if title not in scored_assessments:
scored_assessments[title] = []
scored_assessments[title].append(
score['weight'] * score['score'])
for title in scored_assessments:
avg = (sum(scored_assessments[title]) * 1.0 /
len(scored_assessments[title]))
scored_assessments[title] = avg
ranked_assessments = {kv[0]: rank for rank, kv in
enumerate(
sorted(scored_assessments.items(),
lambda i1, i2: cmp(i1[1], i2[1])))}
for score in student_scores:
score['user_rank'] = ranked_students[score['user_id']]
score['assessment_rank'] = ranked_assessments[score['title']]
return student_scores
class LabelsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'labels'
@classmethod
def get_title(cls):
return 'Labels'
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, app_context, log, source_context):
reg = schema_fields.FieldRegistry(
'Labels',
description='All labels used in course')
reg.add_property(schema_fields.SchemaField(
'label_id', 'Label ID', 'string',
description='Key uniquely identifying this particular label'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Human-readable title for the label'))
reg.add_property(schema_fields.SchemaField(
'description', 'Description', 'string',
description='Description for the label.'))
reg.add_property(schema_fields.SchemaField(
'type', 'Type', 'string',
description='Sub-type of label indicating what this kind of '
'label is used for. E.g., setting track through a course or '
'selecting a display language.'))
reg.add_property(schema_fields.SchemaField(
'user_editable', 'User Editable', 'boolean',
description='Set to true if regular users are permitted to '
'set/remove labels of this type.'))
reg.add_property(schema_fields.SchemaField(
'system_editable', 'System Editable', 'boolean',
description='Set to true if only admin users are permitted to '
'set/remove labels of this type.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, source_context, schema, log,
page_number):
ret = []
for label in models.LabelDAO.get_all_iter():
label_type = None
for label_type in models.LableDTO.LABEL_TYPES:
if label_type.type == label.type:
break
user_editable = (
label_type in models.LabelDTO.USER_EDITABLE_LABEL_TYPES)
system_editable = (
label_type in models.LabelDTO.SYSTEM_EDITABLE_LABEL_TYPES)
ret.append({
'label_id': str(label.id),
'title': label.title,
'description': label.description,
'type': label_type.name,
'user_editable': user_editable,
'system_editable': system_editable,
})
return ret, 0
class StudentsDataSource(data_sources.AbstractDbTableRestDataSource):
@classmethod
def get_entity_class(cls):
return models.Student
@classmethod
def get_name(cls):
return 'students'
@classmethod
def get_title(cls):
return 'Students'
@classmethod
def exportable(cls):
return True
@classmethod
def get_default_chunk_size(cls):
return 100
@classmethod
def get_schema(cls, app_context, log, source_context):
"""Override default entity-based schema to reflect our upgrades.
In the entity, labels are stored as a single string property,
rather than an arraylist of string or integer for backward
compatibility. Current (2014-12-05) usage is that the 'labels'
property is a stringified representation of a list of IDs
to LabelEntity. On export, we convert the string to an array
of string to permit easier correlation from student labels to
exported LabelEntity items.
We provide external references to labels in preference to simply
resolving the labels, because of potential data bloat (minor) and
to avoid any future complications due to expansion of the role
of labels (as was seen when labels-as-language-indicator was
added).
Args:
app_context: Standard CB application context object
log: a catch_and_log object for reporting any exceptions.
Not used here, but useful for data source types that are
dynamically generated, rather than statically coded.
Returns:
A JSON schema describing contents. A dict as produced by
FieldRegistry.get_json_schema_dict().
"""
clazz = cls.get_entity_class()
if source_context.send_uncensored_pii_data:
registry = entity_transforms.get_schema_for_entity_unsafe(clazz)
registry.add_property(schema_fields.SchemaField(
'email', 'Email', 'string',
optional=True,
description='Email address for this Student.'))
else:
registry = entity_transforms.get_schema_for_entity(clazz)
ret = registry.get_json_schema_dict()['properties']
# Scores are deprecated now that regularized scores are available
# in StudentAggregation data source.
if 'scores' in ret:
del ret['scores']
# We are replacing the labels string with a version that shows
# labels as separate items so that the amount of insanity
# required in BigQuery SQL is minimized.
ret['labels'] = schema_fields.FieldArray(
'labels', 'Labels',
description='Labels on students',
item_type=schema_fields.SchemaField(
'label', 'Label', 'string',
description='ID of a LabelEntity applied to a student')
).get_json_schema_dict()
# If a course owner has allowed some or all portions of
# 'additional_fields', convert from a flat string into an array
# of name/value pairs
if 'additional_fields' in ret:
additional_field = schema_fields.FieldRegistry('additional_field')
additional_field.add_property(schema_fields.SchemaField(
'name', 'Name', 'string',
description='HTML form field name. Not necessarily unique.'))
additional_field.add_property(schema_fields.SchemaField(
'value', 'Value', 'string',
description='HTML form field value.'))
ret['additional_fields'] = schema_fields.FieldArray(
'additional_fields', 'Additional Fields',
item_type=additional_field,
description='List of name/value pairs entered on the '
'course registration form. Note that for names are not '
'necessarily unique. E.g., a group of checkboxes for '
'"select all reasons you are taking this course" may well '
'all have the same name.').get_json_schema_dict()
return ret
@classmethod
def _postprocess_rows(cls, app_context, source_context, schema,
log, page_number, rows):
ret = super(StudentsDataSource, cls)._postprocess_rows(
app_context, source_context, schema, log, page_number, rows)
# These don't add any value, and do add substantially to data volume.
# (The user_id field is what's valuable for matching to other items
# such as StudentAnswersEntity records.)
for item in ret:
if 'key' in item:
del item['key']
if 'key_by_user_id' in item:
del item['key_by_user_id']
if 'safe_key' in item:
del item['safe_key']
if 'scores' in item:
del item['scores']
item['labels'] = (
[x for x in utils.text_to_list(item['labels'])])
if 'scores' in ret:
del item['scores']
if item.get('additional_fields'):
additional_fields = transforms.loads(item['additional_fields'])
item['additional_fields'] = [
{'name': l[0], 'value': l[1]} for l in additional_fields]
# Here, run through the Student entities to pick up the email address.
# Since the email is not stored as an actual property in the entity, but
# instead is just part of the key, we have to manually extract it. Note
# that here we are making the entirely reasonable assumption that the
# cardinality of the list of Student entity and the dict-of-properties
# list in 'ret' is the same.
if source_context.send_uncensored_pii_data:
for student, output_dict in zip(rows, ret):
output_dict['email'] = student.email
return ret
class LabelsOnStudentsGenerator(jobs.AbstractCountingMapReduceJob):
@staticmethod
def get_description():
return 'labels on students'
@staticmethod
def entity_class():
return models.Student
@staticmethod
def map(student):
for label_id_str in utils.text_to_list(student.labels):
yield (label_id_str, 1)
class LabelsOnStudentsDataSource(data_sources.AbstractRestDataSource):
@staticmethod
def required_generators():
return [LabelsOnStudentsGenerator]
@classmethod
def get_name(cls):
return 'labels_on_students'
@classmethod
def get_title(cls):
return 'Labels on Students'
@classmethod
def get_default_chunk_size(cls):
return 0 # Meaning we don't need pagination
@classmethod
def get_context_class(cls):
return data_sources.NullContextManager
@classmethod
def get_schema(cls, app_context, log, source_context):
reg = schema_fields.FieldRegistry(
'Students By Label',
description='Count of students marked with each label')
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Name for this label'))
reg.add_property(schema_fields.SchemaField(
'description', 'Description', 'string',
description='Human-readable text describing the label'))
reg.add_property(schema_fields.SchemaField(
'type', 'Type', 'string',
description='Title of label group to which this label belongs.'))
reg.add_property(schema_fields.SchemaField(
'count', 'Count', 'integer',
description='Number of students with this label applied'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, source_context, schema, log, page_number,
labels_on_students_job):
label_counts = jobs.MapReduceJob.get_results(labels_on_students_job)
counts = {int(x[0]): int(x[1]) for x in label_counts}
type_titles = {lt.type: lt.title for lt in models.LabelDTO.LABEL_TYPES}
ret = []
for label in models.LabelDAO.get_all():
ret.append({
'title': label.title,
'description': label.description,
'type': type_titles[label.type],
'count': counts.get(label.id, 0),
})
return ret, 0
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide data sources of common CourseBuilder items.
If you are adding an extension module to CourseBuilder and you wish to include
a data source as part of that extension, you should add that class in the
directory specific to your module, rather than registering it here. That way,
if your module needs to be disabled, that can be done all at once.
This module provides data sources for common CourseBuilder entites using the
framework defined in models/data_sources. The name of this module differs
both to reflect the concrete/abstract disctintion as well as to avoid module
naming conflicts.
"""
__author__ = 'Mike Gainer (mgainer@google.com)'
from models import custom_modules
from models import data_sources
from modules.data_source_providers import rest_providers
from modules.data_source_providers import synchronous_providers
custom_module = None
MODULE_NAME = 'Data Source Providers'
def _notify_module_enabled():
data_sources.Registry.register(synchronous_providers.QuestionStatsSource)
data_sources.Registry.register(
synchronous_providers.StudentEnrollmentAndScoresSource)
data_sources.Registry.register(
synchronous_providers.StudentProgressStatsSource)
data_sources.Registry.register(rest_providers.AssessmentsDataSource)
data_sources.Registry.register(rest_providers.UnitsDataSource)
data_sources.Registry.register(rest_providers.LessonsDataSource)
data_sources.Registry.register(
rest_providers.StudentAssessmentScoresDataSource)
data_sources.Registry.register(rest_providers.LabelsDataSource)
data_sources.Registry.register(rest_providers.StudentsDataSource)
data_sources.Registry.register(rest_providers.LabelsOnStudentsDataSource)
def _notify_module_disabled():
raise NotImplementedError(
'Data sources may not be un-registered; disabling this module '
'is not supported.')
def register_module():
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_NAME,
'Implementations of specific data sources.',
[], [], _notify_module_enabled, _notify_module_disabled)
return custom_module
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide data sources performing synchronous (non-paginated) reads."""
__author__ = 'Sean Lip (sll@google.com)'
import logging
import urlparse
from common import safe_dom
from models import courses
from models import data_sources
from models import jobs
from models import progress
from models import transforms
from models import utils as models_utils
from models.models import EventEntity
from models.models import Student
from models.models import StudentPropertyEntity
class StudentEnrollmentAndScoresGenerator(jobs.DurableJob):
"""A job that computes student statistics."""
@staticmethod
def get_description():
return 'student enrollment and scores'
class ScoresAggregator(object):
"""Aggregates scores statistics."""
def __init__(self):
# We store all data as tuples keyed by the assessment type name.
# Each tuple keeps:
# (student_count, sum(score))
self.name_to_tuple = {}
def visit(self, student):
if student.scores:
scores = transforms.loads(student.scores)
for key in scores.keys():
if key in self.name_to_tuple:
count = self.name_to_tuple[key][0]
score_sum = self.name_to_tuple[key][1]
else:
count = 0
score_sum = 0
self.name_to_tuple[key] = (
count + 1, score_sum + float(scores[key]))
class EnrollmentAggregator(object):
"""Aggregates enrollment statistics."""
def __init__(self):
self.enrolled = 0
self.unenrolled = 0
def visit(self, student):
if student.is_enrolled:
self.enrolled += 1
else:
self.unenrolled += 1
def run(self):
"""Computes student statistics."""
enrollment = self.EnrollmentAggregator()
scores = self.ScoresAggregator()
mapper = models_utils.QueryMapper(
Student.all(), batch_size=500, report_every=1000)
def map_fn(student):
enrollment.visit(student)
scores.visit(student)
mapper.run(map_fn)
data = {
'enrollment': {
'enrolled': enrollment.enrolled,
'unenrolled': enrollment.unenrolled},
'scores': scores.name_to_tuple}
return data
class StudentEnrollmentAndScoresSource(data_sources.SynchronousQuery):
"""Shows student enrollment analytics on the dashboard."""
@staticmethod
def required_generators():
return [StudentEnrollmentAndScoresGenerator]
@staticmethod
def fill_values(app_context, template_values, job):
stats = transforms.loads(job.output)
template_values['enrolled'] = stats['enrollment']['enrolled']
template_values['unenrolled'] = stats['enrollment']['unenrolled']
scores = []
total_records = 0
for key, value in stats['scores'].items():
total_records += value[0]
avg = round(value[1] / value[0], 1) if value[0] else 0
scores.append({'key': key, 'completed': value[0],
'avg': avg})
template_values['scores'] = scores
template_values['total_records'] = total_records
class StudentProgressStatsGenerator(jobs.DurableJob):
"""A job that computes student progress statistics."""
@staticmethod
def get_description():
return 'student progress'
class ProgressAggregator(object):
"""Aggregates student progress statistics."""
def __init__(self, course):
self.progress_data = {}
self._tracker = progress.UnitLessonCompletionTracker(course)
def visit(self, student_property):
if (student_property.value and
student_property.name ==
progress.UnitLessonCompletionTracker.PROPERTY_KEY):
entity_scores = transforms.loads(student_property.value)
for entity in entity_scores:
entity_score = self.progress_data.get(
entity, {'progress': 0, 'completed': 0})
if self._tracker.determine_if_composite_entity(entity):
if (entity_scores[entity] ==
self._tracker.IN_PROGRESS_STATE):
entity_score['progress'] += 1
elif (entity_scores[entity] ==
self._tracker.COMPLETED_STATE):
entity_score['completed'] += 1
else:
if entity_scores[entity] != 0:
entity_score['completed'] += 1
self.progress_data[entity] = entity_score
def __init__(self, app_context):
super(StudentProgressStatsGenerator, self).__init__(app_context)
self._course = courses.Course(None, app_context)
def run(self):
"""Computes student progress statistics."""
student_progress = self.ProgressAggregator(self._course)
mapper = models_utils.QueryMapper(
StudentPropertyEntity.all(), batch_size=500, report_every=1000)
mapper.run(student_progress.visit)
return student_progress.progress_data
class StudentProgressStatsSource(data_sources.SynchronousQuery):
"""Shows student progress analytics on the dashboard."""
@staticmethod
def required_generators():
return [StudentProgressStatsGenerator]
@staticmethod
def fill_values(app_context, template_values, job):
course = courses.Course(None, app_context=app_context)
template_values['entity_codes'] = transforms.dumps(
progress.UnitLessonCompletionTracker.EVENT_CODE_MAPPING.values())
value = transforms.loads(job.output)
if value:
value = transforms.dumps(value)
else:
value = None
template_values['progress'] = value
try:
template_values['content'] = transforms.dumps(
progress.ProgressStats(course).compute_entity_dict(
'course', []))
except IOError:
template_values['update_message'] = safe_dom.Text(
'This feature is supported by CB 1.3 and up.')
class QuestionStatsGenerator(jobs.DurableJob):
"""A job that computes stats for student submissions to questions."""
@staticmethod
def get_description():
return 'question analysis'
class MultipleChoiceQuestionAggregator(object):
"""Class that aggregates submissions for multiple-choice questions."""
ATTEMPT_ACTIVITY = 'attempt-activity'
TAG_ASSESSMENT = 'tag-assessment'
ATTEMPT_LESSON = 'attempt-lesson'
SUBMIT_ASSESSMENT = 'submit-assessment'
ATTEMPT_ASSESSMENT = 'attempt-assessment'
MC_QUESTION = 'McQuestion'
QUESTION_GROUP = 'QuestionGroup'
ACTIVITY_CHOICE = 'activity-choice'
ACTIVITY_GROUP = 'activity-group'
def __init__(self, course):
self._course = course
self.id_to_questions_dict = progress.UnitLessonCompletionTracker(
course).get_id_to_questions_dict()
self.id_to_assessments_dict = progress.UnitLessonCompletionTracker(
course).get_id_to_assessments_dict()
def _get_course(self):
return self._course
def _append_data(self, summarized_question, dict_to_update):
# Validate the structure and content of summarized_question dict.
if set(summarized_question.keys()) != {'id', 'score', 'answers'}:
return
if not isinstance(summarized_question['score'], (int, float)):
return
if not isinstance(summarized_question['answers'], list):
return
if any(not isinstance(answer, int) for answer in (
summarized_question['answers'])):
return
if summarized_question['id'] not in dict_to_update:
return
if max(summarized_question['answers']) >= len(
dict_to_update[summarized_question['id']]['answer_counts']):
return
# Add the summarized_question to the aggregating dict.
q_dict = dict_to_update[summarized_question['id']]
q_dict['score'] += summarized_question['score']
q_dict['num_attempts'] += 1
for choice_index in summarized_question['answers']:
q_dict['answer_counts'][choice_index] += 1
def _get_unit_and_lesson_id_from_url(self, url):
url_components = urlparse.urlparse(url)
query_dict = urlparse.parse_qs(url_components.query)
if 'unit' not in query_dict:
return None, None
unit_id = query_dict['unit'][0]
lesson_id = None
if 'lesson' in query_dict:
lesson_id = query_dict['lesson'][0]
else:
lessons = self._get_course().get_lessons(unit_id)
lesson_id = lessons[0].lesson_id if lessons else None
return unit_id, lesson_id
def _summarize_multiple_questions(self, data, id_prefix):
"""Helper method that summarizes events from a list of questions.
Args:
data: data dict from event_entity['data'].
id_prefix: str. Questions in lessons have 'u.#.l.#' formatted
prefix representing the unit and lesson id, and questions
in assessments have 's.#' formatted prefix representing
the assessment id.
Returns:
A list of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
type_info_dict = data['containedTypes']
questions_list = []
for instanceid, type_info in type_info_dict.iteritems():
if isinstance(type_info, list):
# This is a question group.
mc_indices = [i for i in xrange(len(type_info))
if type_info[i] == self.MC_QUESTION]
questions_list += [{
'id': '%s.c.%s.i.%s' % (id_prefix, instanceid, index),
'score': data['individualScores'][instanceid][index],
'answers': data['answers'][instanceid][index]
} for index in mc_indices if (
data['answers'][instanceid][index])]
elif (type_info == self.MC_QUESTION and
data['answers'][instanceid]):
# This is an individual multiple-choice question.
questions_list += [{
'id': '%s.c.%s' % (id_prefix, instanceid),
'score': data['individualScores'][instanceid],
'answers': data['answers'][instanceid]
}]
return questions_list
def _get_questions_from_attempt_activity(self, event_data):
"""Summarizes activity event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(
event_data['location'])
if unit_id is None or lesson_id is None:
return []
if (event_data['type'] == self.ACTIVITY_CHOICE and
event_data['value'] is not None):
return [{
'id': 'u.%s.l.%s.b.%s' % (
unit_id, lesson_id, event_data['index']),
'score': 1.0 if event_data['correct'] else 0.0,
'answers': [event_data['value']]
}]
elif event_data['type'] == self.ACTIVITY_GROUP:
block_id = event_data['index']
return [{
'id': 'u.%s.l.%s.b.%s.i.%s' % (
unit_id, lesson_id, block_id, answer['index']),
'score': 1.0 if answer['correct'] else 0.0,
'answers': answer['value']
} for answer in event_data['values'] if answer['value']]
else:
return []
def _get_questions_from_tag_assessment(self, event_data):
"""Summarizes assessment tag event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(
event_data['location'])
if unit_id is None or lesson_id is None:
return []
if event_data['type'] == self.QUESTION_GROUP:
mc_indices = [
i for i in xrange(len(event_data['containedTypes']))
if event_data['containedTypes'][i] == self.MC_QUESTION]
return [{
'id': 'u.%s.l.%s.c.%s.i.%s' % (
unit_id, lesson_id, event_data['instanceid'], index),
'score': event_data['individualScores'][index],
'answers': event_data['answer'][index]
} for index in mc_indices if event_data['answer'][index]]
elif (event_data['type'] == self.MC_QUESTION and
event_data['answer']):
# This is a single multiple-choice question.
return [{
'id': 'u.%s.l.%s.c.%s' % (
unit_id, lesson_id, event_data['instanceid']),
'score': event_data['score'],
'answers': event_data['answer']
}]
else:
return []
def _get_questions_from_attempt_lesson(self, event_data):
"""Summarizes lesson attempt event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(
event_data['location'])
if unit_id is None or lesson_id is None:
return []
return self._summarize_multiple_questions(
event_data, 'u.%s.l.%s' % (unit_id, lesson_id))
def _get_questions_from_submit_and_attempt_assessment(self, event_data):
"""Summarizes assessment submission event data into a list of dicts.
Args:
event_data: data dict from event_entity['data'].
Returns:
List of dicts. Each of the dicts in the output list has the
following keys: ['id', 'score', 'answers'].
"""
if not event_data['type'].startswith('assessment-'):
return []
assessment_id = event_data['type'][len('assessment-'):]
values = event_data['values']
if isinstance(values, list):
# This is a v1.4 (or older) assessment.
mc_indices = [i for i in xrange(len(values))
if values[i]['type'] == 'choices']
return [{
'id': 's.%s.i.%s' % (assessment_id, index),
'score': 1.0 if values[index]['correct'] else 0.0,
'answers': [values[index]['value']]
} for index in mc_indices if values[index]['value'] is not None]
elif isinstance(values, dict):
# This is a v1.5 assessment.
return self._summarize_multiple_questions(
values, 's.%s' % assessment_id)
else:
return []
def _process_event(self, source, data):
"""Returns a list of questions that correspond to the event."""
question_list = []
try:
if source == self.ATTEMPT_ACTIVITY:
question_list = self._get_questions_from_attempt_activity(
data)
elif source == self.TAG_ASSESSMENT:
question_list = self._get_questions_from_tag_assessment(
data)
elif source == self.ATTEMPT_LESSON:
question_list = self._get_questions_from_attempt_lesson(
data)
elif (source == self.SUBMIT_ASSESSMENT or
source == self.ATTEMPT_ASSESSMENT):
question_list = (
self._get_questions_from_submit_and_attempt_assessment(
data))
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to process question analytics event: '
'source %s, data %s, error %s', source, data, e)
return question_list
def visit(self, event_entity):
"""Records question data from given event_entity."""
if not event_entity or not event_entity.source:
return
try:
data = transforms.loads(event_entity.data)
except Exception: # pylint: disable=broad-except
return
# A list of dicts. Each dict represents a question instance and has
# the following keys: ['id', 'score', 'answers']. Note that a
# single event may correspond to multiple question instance dicts.
question_list = self._process_event(event_entity.source, data)
# Update the correct dict according to the event source.
if (event_entity.source == self.SUBMIT_ASSESSMENT or
event_entity.source == self.ATTEMPT_ASSESSMENT):
dict_to_update = self.id_to_assessments_dict
else:
dict_to_update = self.id_to_questions_dict
for summarized_question in question_list:
self._append_data(summarized_question, dict_to_update)
def __init__(self, app_context):
super(QuestionStatsGenerator, self).__init__(app_context)
self._course = courses.Course(None, app_context)
def run(self):
"""Computes submitted question answers statistics."""
question_stats = self.MultipleChoiceQuestionAggregator(self._course)
mapper = models_utils.QueryMapper(
EventEntity.all(), batch_size=500, report_every=1000)
mapper.run(question_stats.visit)
return (question_stats.id_to_questions_dict,
question_stats.id_to_assessments_dict)
class QuestionStatsSource(data_sources.SynchronousQuery):
"""Shows statistics on the dashboard for students' answers to questions."""
@staticmethod
def required_generators():
return [QuestionStatsGenerator]
@staticmethod
def fill_values(app_context, template_values, job):
# pylint: disable=unpacking-non-sequence
accumulated_question_answers, accumulated_assessment_answers = (
transforms.loads(job.output))
template_values['accumulated_question_answers'] = transforms.dumps(
accumulated_question_answers)
template_values['accumulated_assessment_answers'] = transforms.dumps(
accumulated_assessment_answers)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting updates to basic course settings."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
import cgi
import urllib
from common import crypto
from common import schema_fields
from controllers import utils as controllers_utils
from models import courses
from models import models
from models import roles
from models import transforms
from modules.dashboard import filer
from modules.dashboard import messages
from modules.oeditor import oeditor
class CourseSettingsRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class CourseSettingsHandler(controllers_utils.ApplicationHandler):
"""Course settings handler."""
EXTRA_CSS_FILES = []
EXTRA_JS_FILES = []
ADDITIONAL_DIRS = []
def post_course_availability(self):
course = self.get_course()
settings = course.get_environ(self.app_context)
availability = self.request.get('availability') == 'True'
settings['course']['now_available'] = availability
course.save_settings(settings)
self.redirect('/dashboard')
def post_course_browsability(self):
course = self.get_course()
settings = course.get_environ(self.app_context)
browsability = self.request.get('browsability') == 'True'
settings['course']['browsable'] = browsability
course.save_settings(settings)
self.redirect('/dashboard')
def post_edit_course_settings(self):
"""Handles editing of course.yaml."""
filer.create_course_file_if_not_exists(self)
extra_args = {}
for name in ('section_names', 'tab', 'tab_title', 'exit_url'):
value = self.request.get(name)
if value:
extra_args[name] = value
self.redirect(self.get_action_url(
'edit_basic_settings', key='/course.yaml', extra_args=extra_args))
def get_edit_basic_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
tab = self.request.get('tab')
tab_title = self.request.get('tab_title')
section_names = urllib.unquote(self.request.get('section_names'))
exit_url = (
self.request.get('exit_url') or
self.canonicalize_url('/dashboard?action=settings&tab=%s' % tab))
template_values = {}
self._show_edit_settings_section(
template_values, key, tab, tab_title, section_names, exit_url)
self.render_page(template_values, in_action='settings')
def _show_edit_settings_section(self, template_values, key, tab, tab_title,
section_names=None, exit_url=''):
# The editor for all course settings is getting rather large. Here,
# prune out all sections except the one named. Names can name either
# entire sub-registries, or a single item. E.g., "course" selects all
# items under the 'course' sub-registry, while
# "base.before_head_tag_ends" selects just that one field.
registry = self.get_course().create_settings_schema()
if section_names:
registry = registry.clone_only_items_named(section_names.split(','))
rest_url = self.canonicalize_url(CourseSettingsRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self, registry.get_json_schema(), registry.get_schema_dict(),
key, rest_url, exit_url, extra_css_files=self.EXTRA_CSS_FILES,
extra_js_files=self.EXTRA_JS_FILES,
additional_dirs=self.ADDITIONAL_DIRS,
required_modules=CourseSettingsRESTHandler.REQUIRED_MODULES)
template_values.update({
'page_title': self.format_title(
'Settings > %s' %
urllib.unquote(tab_title)),
'page_description': messages.EDIT_SETTINGS_DESCRIPTION,
'main_content': form_html,
})
class CourseYamlRESTHandler(controllers_utils.BaseRESTHandler):
"""Common base for REST handlers in this file."""
def get_course_dict(self):
return self.get_course().get_environ(self.app_context)
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert self.app_context.is_editable_fs()
key = self.request.get('key')
if not CourseSettingsRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical('/course.yaml')
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
json_payload = self.process_get()
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=crypto.XsrfTokenManager.create_xsrf_token(
self.XSRF_ACTION))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert self.app_context.is_editable_fs()
request_param = self.request.get('request')
if not request_param:
transforms.send_json_response(
self, 400, 'Missing "request" parameter.')
return
try:
request = transforms.loads(request_param)
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed "request" parameter.')
return
key = request.get('key')
if not key:
transforms.send_json_response(
self, 400, 'Request missing "key" parameter.')
return
payload_param = request.get('payload')
if not payload_param:
transforms.send_json_response(
self, 400, 'Request missing "payload" parameter.')
return
try:
payload = transforms.loads(payload_param)
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed "payload" parameter.')
return
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_ACTION, {'key': key}):
return
if not CourseSettingsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
request_data = self.process_put(request, payload)
if request_data:
course_settings = courses.deep_dict_merge(
request_data, self.get_course_dict())
self.postprocess_put(course_settings, request)
if not self.get_course().save_settings(course_settings):
transforms.send_json_response(self, 412, 'Validation error.')
transforms.send_json_response(self, 200, 'Saved.')
def postprocess_put(self, course_settings, request):
pass
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_ACTION, {'key': key}):
return
if (not CourseSettingsRights.can_delete(self) or
not self.is_deletion_allowed()):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = self.process_delete()
if self.get_course().save_settings(entity):
transforms.send_json_response(self, 200, 'Deleted.')
class CourseSettingsRESTHandler(CourseYamlRESTHandler):
"""Provides REST API for a file."""
REQUIRED_MODULES = [
'inputex-date', 'inputex-string', 'inputex-textarea', 'inputex-url',
'inputex-checkbox', 'inputex-select', 'inputex-uneditable', 'gcb-rte',
'inputex-hidden',
]
URI = '/rest/course/settings'
XSRF_ACTION = 'basic-course-settings-put'
def get_group_id(self, email):
if not email or '@googlegroups.com' not in email:
return None
return email.split('@')[0]
def get_groups_web_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/group/' + group_id
def get_groups_embed_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/forum/embed/?place=forum/' + group_id
def process_get(self):
entity = {}
schema = self.get_course().create_settings_schema()
schema.convert_entity_to_json_entity(
self.get_course_dict(), entity)
json_payload = transforms.dict_to_json(
entity, schema.get_json_schema_dict())
return json_payload
def _process_course_data(self, course_data):
if 'forum_email' in course_data:
forum_email = course_data['forum_email']
forum_web_url = self.get_groups_web_url(forum_email)
if forum_web_url:
course_data['forum_url'] = forum_web_url
forum_web_url = self.get_groups_embed_url(forum_email)
if forum_web_url:
course_data['forum_embed_url'] = forum_web_url
if 'announcement_list_email' in course_data:
announcement_email = course_data['announcement_list_email']
announcement_web_url = self.get_groups_web_url(
announcement_email)
if announcement_web_url:
course_data['announcement_list_url'] = announcement_web_url
def _process_extra_locales(self, extra_locales):
"""Make sure each locale has a label to go along."""
existing = set([
label.title for label in models.LabelDAO.get_all_of_type(
models.LabelDTO.LABEL_TYPE_LOCALE)])
course_locale = self.app_context.default_locale
for extra_locale in extra_locales + [{'locale': course_locale}]:
locale = extra_locale['locale']
if locale in existing:
continue
models.LabelDAO.save(models.LabelDTO(
None, {'title': locale,
'version': '1.0',
'description': '[%s] locale' % locale,
'type': models.LabelDTO.LABEL_TYPE_LOCALE}))
def process_put(self, request, payload):
errors = []
request_data = {}
schema = self.get_course().create_settings_schema()
schema.convert_json_to_entity(payload, request_data)
schema.validate(request_data, errors)
if errors:
transforms.send_json_response(
self, 400, 'Invalid data: \n' + '\n'.join(errors))
return
if 'extra_locales' in request_data:
self._process_extra_locales(request_data['extra_locales'])
if 'course' in request_data:
self._process_course_data(request_data['course'])
return request_data
def is_deletion_allowed(self):
return False
class HtmlHookHandler(controllers_utils.ApplicationHandler):
"""Set up for OEditor manipulation of HTML hook contents.
A separate handler and REST handler is required for hook contents,
since the set of hooks is not statically known. Users are free to add
whatever hooks they want where-ever they want with fairly arbitrary
names. This class and its companion REST class deal with persisting the
hook values into the course.yaml settings.
"""
def post_edit_html_hook(self):
filer.create_course_file_if_not_exists(self)
self.redirect(self.get_action_url(
'edit_html_hook', key=self.request.get('html_hook')))
def get_edit_html_hook(self):
key = self.request.get('key')
registry = HtmlHookRESTHandler.REGISTRY
exit_url = self.canonicalize_url(self.request.referer)
rest_url = self.canonicalize_url(HtmlHookRESTHandler.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(HtmlHookRESTHandler.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(HtmlHookRESTHandler.XSRF_ACTION))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self, registry.get_json_schema(), registry.get_schema_dict(),
key, rest_url, exit_url,
delete_url=delete_url, delete_method='delete',
required_modules=HtmlHookRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Hook HTML')
template_values['page_description'] = (
messages.EDIT_HTML_HOOK_DESCRIPTION)
template_values['main_content'] = form_html
self.render_page(template_values)
def _create_hook_registry():
reg = schema_fields.FieldRegistry('Html Hook', description='Html Hook')
reg.add_property(schema_fields.SchemaField(
'hook_content', 'HTML Hook Content', 'html',
optional=True))
return reg
class HtmlHookRESTHandler(CourseYamlRESTHandler):
"""REST API for individual HTML hook entries in course.yaml."""
REGISTRY = _create_hook_registry()
REQUIRED_MODULES = [
'inputex-textarea', 'inputex-uneditable', 'gcb-rte', 'inputex-hidden']
URI = '/rest/course/html_hook'
XSRF_ACTION = 'html-hook-put'
def process_get(self):
html_hook = self.request.get('key')
item = controllers_utils.HtmlHooks.get_content(
self.get_course(), html_hook)
return {'hook_content': item}
def process_put(self, request, payload):
request_data = {}
HtmlHookRESTHandler.REGISTRY.convert_json_to_entity(
payload, request_data)
if 'hook_content' not in request_data:
transforms.send_json_response(
self, 400, 'Payload missing "hook_content" parameter.')
return None
key = request.get('key')
if not key:
transforms.send_json_response(
self, 400, 'Blank or missing "key" parameter.')
return None
# Walk from bottom to top of hook element name building up
# dict-in-dict until we are at outermost level, which is
# the course_dict we will return.
course_dict = request_data['hook_content']
for element in reversed(
key.split(controllers_utils.HtmlHooks.SEPARATOR)):
course_dict = {element: course_dict}
return {controllers_utils.HtmlHooks.HTML_HOOKS: course_dict}
def postprocess_put(self, course_settings, request):
# We may have HTML hooks that appear starting from the root of the
# course config dict hierarchy, rather than within the 'html_hooks'
# top-level dict. If so, remove the old version so it does not
# hang around being confusing. (Note that we only do this as a
# post-step after process_put(), so we will only delete old items
# as they are updated by the admin)
key = request.get('key')
if key:
self._process_delete_internal(course_settings, key)
def is_deletion_allowed(self):
return True
def process_delete(self):
key = self.request.get('key')
course_dict = self.get_course_dict()
# Remove from html_hooks sub-dict
self._process_delete_internal(
course_dict.get(controllers_utils.HtmlHooks.HTML_HOOKS, {}), key)
# Also remove from top-level, just in case we have an old course.
self._process_delete_internal(course_dict, key)
return course_dict
def _process_delete_internal(self, course_dict, key):
pruned_dict = course_dict
for element in key.split(controllers_utils.HtmlHooks.SEPARATOR):
if element in pruned_dict:
if type(pruned_dict[element]) == dict:
pruned_dict = pruned_dict[element]
else:
del pruned_dict[element]
return course_dict
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of questions."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import schema_fields
from models import models
from models import roles
from modules.dashboard import dto_editor
from google.appengine.api import users
class AdminPreferencesEditor(dto_editor.BaseDatastoreAssetEditor):
"""An editor for editing and managing course admin preferences.
Note that this editor operates on StudentPreferencesDAO instances.
This is intentional; that type stores per-human, per-course prefs.
This editor exposes only the admin-specific settings, and is
available only in contexts where the user is a course admin.
(I.e, the dashboard.)
"""
def post_edit_admin_preferences(self):
template_values = {}
self._edit_admin_preferences(
template_values,
'/dashboard?action=settings&tab=admin_prefs')
self.render_page(template_values, 'settings')
def _edit_admin_preferences(self, template_values, exit_url):
if not roles.Roles.is_course_admin(self.app_context):
self.error(401)
return
template_values.update({
'page_title': self.format_title('Edit Preferences'),
'main_content': self.get_form(
AdminPreferencesRESTHandler,
users.get_current_user().user_id(),
exit_url, deletable=False)
})
class AdminPreferencesRESTHandler(dto_editor.BaseDatastoreRestHandler):
URI = '/rest/admin_prefs'
REQUIRED_MODULES = ['inputex-hidden', 'inputex-checkbox']
EXTRA_JS_FILES = []
XSRF_TOKEN = 'admin-prefs-edit'
SCHEMA_VERSIONS = [models.StudentPreferencesDAO.CURRENT_VERSION]
DAO = models.StudentPreferencesDAO
@classmethod
def get_schema(cls):
ret = schema_fields.FieldRegistry(
'Admin Prefs', description='Administrator preferences',
extra_schema_dict_values={
'className': 'inputEx-Group new-form-layout'})
ret.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
ret.add_property(schema_fields.SchemaField(
'id', '', 'string', optional=True, hidden=True))
ret.add_property(schema_fields.SchemaField(
'show_hooks', 'Show Hook Edit Buttons', 'boolean',
description='Whether to show controls on course pages to permit'
'editing of HTML inclusions (hook points) at that location on '
'the page. Turn this setting off to see the course as the '
'student would see it, and on to enable the edit controls.',
optional=True, hidden=False))
ret.add_property(schema_fields.SchemaField(
'show_jinja_context', 'Show Jinja Context', 'boolean',
description='Whether to show a dump of Jinja context contents '
'at the bottom of course pages (Only for admins, and only '
'available on development server.)',
optional=True, hidden=False))
return ret
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'show_hooks': False
}
def validate(self, prefs_dict, key, schema_version, errors):
pass
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting online file editing."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import base64
import cgi
import os
import urllib
import messages
import yaml
import appengine_config
from common import schema_fields
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import roles
from models import transforms
from models import vfs
from modules.dashboard import utils as dashboard_utils
from modules.oeditor import oeditor
from google.appengine.api import users
# Set of string. The relative, normalized path bases we allow uploading of
# binary data into.
ALLOWED_ASSET_BINARY_BASES = frozenset([
'assets/img',
])
# Set of string. The relative, normalized path bases we allow uploading of text
# data into.
ALLOWED_ASSET_TEXT_BASES = frozenset([
'assets/css',
'assets/html',
'assets/lib',
'views'
])
DISPLAYABLE_ASSET_BASES = frozenset([
'assets/img',
])
MAX_ASSET_UPLOAD_SIZE_K = 500
def allowed_asset_upload_bases():
"""The relative, normalized path bases we allow uploading into.
Returns:
Set of string.
"""
return ALLOWED_ASSET_BINARY_BASES.union(ALLOWED_ASSET_TEXT_BASES)
def is_text_payload(payload):
try:
transforms.dumps(payload)
return True
except: # All errors are equivalently bad. pylint: disable=bare-except
return False
def is_readonly_asset(asset):
return not getattr(asset, 'metadata', None)
class FilesRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class FileManagerAndEditor(ApplicationHandler):
"""An editor for editing and managing files."""
local_fs = vfs.LocalReadOnlyFileSystem(logical_home_folder='/')
def _get_delete_url(self, base_url, key, xsrf_token_name):
return '%s?%s' % (
self.canonicalize_url(base_url),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(xsrf_token_name)),
}))
def post_create_or_edit_settings(self):
"""Handles creation or/and editing of course.yaml."""
create_course_file_if_not_exists(self)
extra_args = {}
for name in ('tab', 'tab_title'):
value = self.request.get(name)
if value:
extra_args[name] = value
self.redirect(self.get_action_url('edit_settings', key='/course.yaml',
extra_args=extra_args))
def get_edit_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
tab = self.request.get('tab')
exit_url = self.canonicalize_url('/dashboard?action=settings&tab=%s' %
tab)
rest_url = self.canonicalize_url('/rest/files/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
FilesItemRESTHandler.SCHEMA_JSON,
FilesItemRESTHandler.SCHEMA_ANNOTATIONS_DICT,
key, rest_url, exit_url,
required_modules=FilesItemRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Settings')
template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values, in_action='settings')
def _is_displayable_asset(self, path):
return any([path.startswith(name) for name in DISPLAYABLE_ASSET_BASES])
def get_manage_asset(self):
"""Show an upload/delete dialog for assets."""
key = self.request.get('key').lstrip('/').rstrip('/')
if not _is_asset_in_allowed_bases(key):
raise ValueError('Cannot add/edit asset with key "%s" ' % key +
'which is not under a valid asset path')
fs = self.app_context.fs.impl
delete_url = None
delete_method = None
delete_message = None
auto_return = False
if fs.isfile(fs.physical_to_logical(key)):
delete_url = self._get_delete_url(
FilesItemRESTHandler.URI, key, 'delete-asset')
delete_method = 'delete'
else:
# Sadly, since we don't know the name of the asset when we build
# the form, the form can't update itself to show the uploaded
# asset when the upload completes. Rather than continue to
# show a blank form, bring the user back to the assets list.
auto_return = True
if self._is_displayable_asset(key):
json = AssetItemRESTHandler.DISPLAYABLE_SCHEMA_JSON
ann = AssetItemRESTHandler.DISPLAYABLE_SCHEMA_ANNOTATIONS_DICT
else:
json = AssetItemRESTHandler.UNDISPLAYABLE_SCHEMA_JSON
ann = AssetItemRESTHandler.UNDISPLAYABLE_SCHEMA_ANNOTATIONS_DICT
tab_name = self.request.get('tab')
exit_url = self.canonicalize_url(
dashboard_utils.build_assets_url(tab_name))
rest_url = self.canonicalize_url(AssetItemRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self, json, ann, key, rest_url, exit_url, save_method='upload',
save_button_caption='Upload', auto_return=auto_return,
delete_url=delete_url, delete_method=delete_method,
delete_message=delete_message,
required_modules=AssetItemRESTHandler.REQUIRED_MODULES,
additional_dirs=[os.path.join(dashboard_utils.RESOURCES_DIR, 'js')])
template_values = {}
template_values['page_title'] = self.format_title('Manage Asset')
template_values['page_description'] = messages.UPLOAD_ASSET_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values, 'assets', tab_name)
def get_manage_text_asset(self):
"""Show an edit/save/delete/revert form for a text asset."""
assert self.app_context.is_editable_fs()
uri = self.request.get('uri')
assert uri
tab_name = self.request.get('tab')
asset = self.app_context.fs.impl.get(
os.path.join(appengine_config.BUNDLE_ROOT, uri))
assert asset
asset_in_datastore_fs = not is_readonly_asset(asset)
try:
asset_in_local_fs = bool(self.local_fs.get(uri))
except IOError:
asset_in_local_fs = False
exit_url = self.canonicalize_url(
dashboard_utils.build_assets_url(tab_name))
rest_url = self.canonicalize_url(TextAssetRESTHandler.URI)
delete_button_caption = 'Delete'
delete_message = None
delete_url = None
if asset_in_datastore_fs:
delete_message = 'Are you sure you want to delete %s?' % uri
delete_url = self._get_delete_url(
TextAssetRESTHandler.URI, uri,
TextAssetRESTHandler.XSRF_TOKEN_NAME)
if asset_in_local_fs:
delete_message = (
'Are you sure you want to restore %s to the original version? '
'All your customizations will be lost.' % uri)
delete_button_caption = 'Restore original'
# Disable the save button if the payload is not text by setting method
# to ''.
save_method = 'put' if is_text_payload(asset.read()) else ''
form_html = oeditor.ObjectEditor.get_html_for(
self,
TextAssetRESTHandler.SCHEMA.get_json_schema(),
TextAssetRESTHandler.SCHEMA.get_schema_dict(),
uri,
rest_url,
exit_url,
delete_button_caption=delete_button_caption,
delete_method='delete',
delete_message=delete_message,
delete_url=delete_url,
required_modules=TextAssetRESTHandler.REQUIRED_MODULES,
save_method=save_method,
)
self.render_page({
'page_title': self.format_title('Edit ' + uri),
'main_content': form_html,
}, 'assets', tab_name)
def create_course_file_if_not_exists(handler):
assert handler.app_context.is_editable_fs()
# Check if course.yaml exists; create if not.
fs = handler.app_context.fs.impl
course_yaml = fs.physical_to_logical('/course.yaml')
if not fs.isfile(course_yaml):
fs.put(course_yaml, vfs.string_to_stream(
courses.EMPTY_COURSE_YAML % users.get_current_user().email()))
def _match_allowed_bases(filename,
allowed_bases=allowed_asset_upload_bases()):
for allowed_base in allowed_bases:
if (filename == allowed_base or
(filename.startswith(allowed_base) and
len(filename) > len(allowed_base) and
filename[len(allowed_base)] == '/')):
return allowed_base
return None
def _is_asset_in_allowed_bases(filename,
allowed_bases=allowed_asset_upload_bases()):
matched_base = _match_allowed_bases(filename, allowed_bases)
return True if matched_base else False
class TextAssetRESTHandler(BaseRESTHandler):
"""REST endpoints for text assets."""
ERROR_MESSAGE_UNEDITABLE = (
'Error: contents are not text and cannot be edited.')
REQUIRED_MODULES = [
'inputex-hidden',
'inputex-textarea',
]
SCHEMA = schema_fields.FieldRegistry('Edit asset', description='Text Asset')
SCHEMA.add_property(schema_fields.SchemaField(
'contents', 'Contents', 'text',
))
SCHEMA.add_property(schema_fields.SchemaField(
'is_text', 'Is Text', 'boolean', hidden=True,
))
SCHEMA.add_property(schema_fields.SchemaField(
'readonly', 'ReadOnly', 'boolean', hidden=True,
))
URI = '/rest/assets/text'
XSRF_TOKEN_NAME = 'manage-text-asset'
def delete(self):
"""Handles the delete verb."""
assert self.app_context.is_editable_fs()
filename = self.request.get('key')
if not (filename and self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN_NAME, {'key': filename})):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': filename})
return
if not _is_asset_in_allowed_bases(filename):
transforms.send_json_response(
self, 400, 'Malformed request.', {'key': filename})
return
self.app_context.fs.impl.delete(
os.path.join(appengine_config.BUNDLE_ROOT, filename))
transforms.send_json_response(self, 200, 'Done.')
def get(self):
"""Handles the get verb."""
assert FilesRights.can_edit(self)
filename = self.request.get('key')
assert filename
asset = self.app_context.fs.impl.get(
os.path.join(appengine_config.BUNDLE_ROOT, filename))
assert asset
contents = asset.read()
is_text = is_text_payload(contents)
if not is_text:
contents = self.ERROR_MESSAGE_UNEDITABLE
json_message = 'Success.' if is_text else self.ERROR_MESSAGE_UNEDITABLE
json_payload = {
'contents': contents,
'is_text': is_text,
'readonly': is_readonly_asset(asset),
}
transforms.send_json_response(
self, 200, json_message, payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN_NAME))
def put(self):
"""Handles the put verb."""
assert self.app_context.is_editable_fs()
request = self.request.get('request')
assert request
request = transforms.loads(request)
payload = transforms.loads(request.get('payload'))
filename = request.get('key')
if not (filename and self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {'key': filename})):
return
if not FilesRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': filename})
return
if not _is_asset_in_allowed_bases(filename):
transforms.send_json_response(
self, 400, 'Malformed request.', {'key': filename})
return
self.app_context.fs.impl.put(
os.path.join(appengine_config.BUNDLE_ROOT, filename),
vfs.string_to_stream(unicode(payload.get('contents'))))
transforms.send_json_response(self, 200, 'Saved.')
class FilesItemRESTHandler(BaseRESTHandler):
"""Provides REST API for a file."""
SCHEMA_JSON = """
{
"id": "Text File",
"type": "object",
"description": "Text File",
"properties": {
"key" : {"type": "string"},
"encoding" : {"type": "string"},
"content": {"type": "text"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
SCHEMA_ANNOTATIONS_DICT = [
(['title'], 'Text File'),
(['properties', 'key', '_inputex'], {
'label': 'ID', '_type': 'uneditable'}),
(['properties', 'encoding', '_inputex'], {
'label': 'Encoding', '_type': 'uneditable'}),
(['properties', 'content', '_inputex'], {
'label': 'Content', '_type': 'text'})]
REQUIRED_MODULES = [
'inputex-string', 'inputex-textarea', 'inputex-select',
'inputex-uneditable']
URI = '/rest/files/item'
FILE_ENCODING_TEXT = 'text/utf-8'
FILE_ENCODING_BINARY = 'binary/base64'
FILE_EXTENTION_TEXT = ['.js', '.css', '.yaml', '.html', '.csv']
@classmethod
def is_text_file(cls, filename):
# TODO(psimakov): this needs to be better and not use linear search
for extention in cls.FILE_EXTENTION_TEXT:
if filename.endswith(extention):
return True
return False
def validate_content(self, filename, content):
# TODO(psimakov): handle more file types here
if filename == '/course.yaml':
courses.Course.validate_course_yaml(content, self.get_course())
elif filename.endswith('.yaml'):
yaml.safe_load(content)
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert self.app_context.is_editable_fs()
key = self.request.get('key')
if not FilesRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
entity = {'key': key}
if self.is_text_file(key):
entity['encoding'] = self.FILE_ENCODING_TEXT
entity['content'] = vfs.stream_to_string(stream)
else:
entity['encoding'] = self.FILE_ENCODING_BINARY
entity['content'] = base64.b64encode(stream.read())
# Render JSON response.
json_payload = transforms.dict_to_json(
entity,
FilesItemRESTHandler.SCHEMA_DICT)
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(
'file-put'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert self.app_context.is_editable_fs()
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'file-put', {'key': key}):
return
# TODO(psimakov): we don't allow editing of all files; restrict further
if not FilesRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
entity = transforms.loads(payload)
encoding = entity['encoding']
content = entity['content']
# Validate the file content.
errors = []
try:
if encoding == self.FILE_ENCODING_TEXT:
content_stream = vfs.string_to_stream(content)
elif encoding == self.FILE_ENCODING_BINARY:
content_stream = base64.b64decode(content)
else:
errors.append('Unknown encoding: %s.' % encoding)
self.validate_content(key, content)
except Exception as e: # pylint: disable=W0703
errors.append('Validation error: %s' % e)
if errors:
transforms.send_json_response(self, 412, ''.join(errors))
return
# Store new file content.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical(key)
fs.put(filename, content_stream)
# Send reply.
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-asset', {'key': key}):
return
if not FilesRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
fs = self.app_context.fs.impl
path = fs.physical_to_logical(key)
if not fs.isfile(path):
transforms.send_json_response(
self, 403, 'File does not exist.', None)
return
fs.delete(path)
transforms.send_json_response(self, 200, 'Deleted.')
def add_asset_handler_base_fields(schema):
"""Helper function for building schemas of asset-handling OEditor UIs."""
schema.add_property(schema_fields.SchemaField(
'file', 'Upload New File', 'file',
optional=True,
description='You may upload a file to set or replace the content '
'of the asset.'))
schema.add_property(schema_fields.SchemaField(
'key', 'Key', 'string',
editable=False,
hidden=True))
schema.add_property(schema_fields.SchemaField(
'base', 'Base', 'string',
editable=False,
hidden=True))
def add_asset_handler_display_field(schema):
"""Helper function for building schemas of asset-handling OEditor UIs."""
schema.add_property(schema_fields.SchemaField(
'asset_url', 'Asset', 'string',
editable=False,
optional=True,
description='This is the asset for the native language for the course.',
extra_schema_dict_values={
'visu': {
'visuType': 'funcName',
'funcName': 'renderAsset'
}
}))
def generate_asset_rest_handler_schema():
schema = schema_fields.FieldRegistry('Asset', description='Asset')
add_asset_handler_base_fields(schema)
return schema
def generate_displayable_asset_rest_handler_schema():
schema = schema_fields.FieldRegistry('Asset', description='Asset')
add_asset_handler_display_field(schema)
add_asset_handler_base_fields(schema)
return schema
class AssetItemRESTHandler(BaseRESTHandler):
"""Provides REST API for managing assets."""
URI = '/rest/assets/item'
UNDISPLAYABLE_SCHEMA = generate_asset_rest_handler_schema()
UNDISPLAYABLE_SCHEMA_JSON = UNDISPLAYABLE_SCHEMA.get_json_schema()
UNDISPLAYABLE_SCHEMA_ANNOTATIONS_DICT = (
UNDISPLAYABLE_SCHEMA.get_schema_dict())
DISPLAYABLE_SCHEMA = generate_displayable_asset_rest_handler_schema()
DISPLAYABLE_SCHEMA_JSON = DISPLAYABLE_SCHEMA.get_json_schema()
DISPLAYABLE_SCHEMA_ANNOTATIONS_DICT = DISPLAYABLE_SCHEMA.get_schema_dict()
REQUIRED_MODULES = [
'inputex-string', 'inputex-uneditable', 'inputex-file',
'inputex-hidden', 'io-upload-iframe']
XSRF_TOKEN_NAME = 'asset-upload'
def _can_write_payload_to_base(self, payload, base):
"""Determine if a given payload type can be put in a base directory."""
# Binary data can go in images; text data can go anywhere else.
if _is_asset_in_allowed_bases(base, ALLOWED_ASSET_BINARY_BASES):
return True
else:
return is_text_payload(payload) and _is_asset_in_allowed_bases(
base, ALLOWED_ASSET_TEXT_BASES)
def get(self):
"""Provides empty initial content for asset upload editor."""
# TODO(jorr): Pass base URI through as request param when generalized.
key = self.request.get('key')
base = _match_allowed_bases(key)
if not base:
transforms.send_json_response(
self, 400, 'Malformed request.', {'key': key})
return
json_payload = {
'key': key,
'base': base,
}
fs = self.app_context.fs.impl
if fs.isfile(fs.physical_to_logical(key)):
json_payload['asset_url'] = key
transforms.send_json_response(
self, 200, 'Success.', payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN_NAME))
def post(self):
is_valid, payload, upload = self._validate_post()
if is_valid:
key = payload['key']
base = payload['base']
if key == base:
# File name not given on setup; we are uploading a new file.
filename = os.path.split(self.request.POST['file'].filename)[1]
physical_path = os.path.join(base, filename)
is_overwrite_allowed = False
else:
# File name already established on setup; use existing
# file's name and uploaded file's data.
physical_path = key
is_overwrite_allowed = True
self._handle_post(physical_path, is_overwrite_allowed, upload)
def _validate_post(self):
"""Handles asset uploads."""
assert self.app_context.is_editable_fs()
if not FilesRights.can_add(self):
transforms.send_file_upload_response(
self, 401, 'Access denied.')
return False, None, None
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, self.XSRF_TOKEN_NAME,
None):
return False, None, None
upload = self.request.POST['file']
if not isinstance(upload, cgi.FieldStorage):
transforms.send_file_upload_response(
self, 403, 'No file specified.')
return False, None, None
payload = transforms.loads(request['payload'])
base = payload['base']
if not _is_asset_in_allowed_bases(base):
transforms.send_file_upload_response(
self, 400, 'Malformed request.', {'key': base})
return False, None, None
content = upload.file.read()
if not self._can_write_payload_to_base(content, base):
transforms.send_file_upload_response(
self, 403, 'Cannot write binary data to %s.' % base)
return False, None, None
if len(content) > MAX_ASSET_UPLOAD_SIZE_K * 1024:
transforms.send_file_upload_response(
self, 403,
'Max allowed file upload size is %dK' % MAX_ASSET_UPLOAD_SIZE_K)
return False, None, None
return True, payload, upload
def _handle_post(self, physical_path, is_overwrite_allowed, upload):
fs = self.app_context.fs.impl
path = fs.physical_to_logical(physical_path)
if fs.isfile(path):
if not is_overwrite_allowed:
transforms.send_file_upload_response(
self, 403, 'Cannot overwrite existing file.')
return
else:
fs.delete(path)
upload.file.seek(0)
fs.put(path, upload.file)
transforms.send_file_upload_response(self, 200, 'Saved.')
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting editing of DAO/DTO-managed models."""
__author__ = [
'John Orr (jorr@google.com)',
'Mike Gainer (mgainer@googe.com)'
]
import cgi
import copy
import urllib
from common.crypto import XsrfTokenManager
from controllers import utils
from models import roles
from models import transforms
from modules.oeditor import oeditor
class BaseDatastoreAssetEditor(utils.ApplicationHandler):
def get_form(
self, rest_handler, key, exit_url, deletable=True,
auto_return=False, app_context=None):
"""Build the Jinja template for the editor form."""
rest_url = self.canonicalize_url(rest_handler.URI)
if exit_url:
exit_url = self.canonicalize_url(exit_url)
if key and deletable:
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(rest_handler.XSRF_TOKEN))
}))
else:
delete_url = None
if app_context:
schema = rest_handler.get_schema(app_context)
else:
schema = rest_handler.get_schema()
return oeditor.ObjectEditor.get_html_for(
self,
schema.get_json_schema(),
schema.get_schema_dict(),
key, rest_url, exit_url,
auto_return=auto_return,
delete_url=delete_url, delete_method='delete',
required_modules=rest_handler.REQUIRED_MODULES,
extra_js_files=rest_handler.EXTRA_JS_FILES,
extra_css_files=getattr(rest_handler, 'EXTRA_CSS_FILES', None),
additional_dirs=getattr(rest_handler, 'ADDITIONAL_DIRS', None),)
class BaseDatastoreRestHandler(utils.BaseRESTHandler):
"""Basic REST operations for DTO objects.
Provides REST functionality for derived classes based on Entity/DAO/DTO
pattern (see models/models.py). Subclasses are expected to provide
the following:
DAO: Subclasses should have a class-level variable named "DAO".
This should name the DAO type corresponding to the entity
being handled. DAO must have a member "DTO", which names
the DTO type.
XSRF_TOKEN: A short string of the form 'foobar-edit', where foobar
is a short, lowercased version of the name of the entity type.
SCHEMA_VERSIONS: A list of supported version numbers of schemas
of items. The 0th element of the list must be the preferred
version number for newly-created items.
Hook method overrides. Other than the basic 'put', 'delete', and
'get' methods, there are a number of hook functions you may need
to override. The only mandatory function is 'get_default_version()'.
"""
def sanitize_input_dict(self, json_dict):
"""Give subclasses a hook to clean up incoming data before storage.
Args:
json_dict: This is the raw dict contining a parse of the JSON
object as returned by the form editor. In particular, it
has not been converted into a DTO yet. Modify the dict
in place to clean up values. (E.g., remove leading/trailing
whitespace, fix up string/int conversions, etc.)
"""
pass
def validate(self, item_dict, key, schema_version, errors):
"""Allow subclasses to do validations that the form cannot.
Args:
item_dict: A Python dict that will be used to populate
the saved version of the item. Modify this in place as
necessary.
key: The key for the item, if available. New items will not
yet have a key when this function is called.
schema_version: This version has already been checked against
the SCHEMA_VERSIONS declared in your class; it is provided
to facilitate dispatch to a version-specific validation
function.
errors: A list of strings. These will be displayed
on the editor page when there is a problem. The save
operation will be prevented if there are any entries in
the errors list.
"""
pass
def pre_save_hook(self, dto):
"""Give subclasses a hook to modify the DTO before saving."""
pass
def after_save_hook(self):
"""Give subclasses a hook to perform an action after saving."""
pass
def is_deletion_allowed(self, dto):
"""Allow subclasses to check referential integrity before delete.
If deletion is not allowed, the subclass should:
- Return False.
- Return an appropriate message to the REST client; the base
class will just return without taking any further action.
Args:
dto: A DTO of the type specified by the subclass' DAO.DTO variable.
Returns:
True: The base class may proceed with deletion.
False: Deletion is prohibited; derived class has emitted a response.
"""
return True
def transform_for_editor_hook(self, item_dict):
"""Allow subclasses to modify dict before it goes to the edit form."""
return item_dict
def transform_after_editor_hook(self, item_dict):
"""Allow subclasses to modify dict returned from editor form."""
return item_dict
def get_default_content(self):
"""Subclass provides default values to initialize editor form."""
raise NotImplementedError('Subclasses must override this function.')
def put(self):
"""Store a DTO in the datastore in response to a PUT."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': key}):
return
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
json_dict = transforms.loads(payload)
self.sanitize_input_dict(json_dict)
errors = []
try:
python_dict = transforms.json_to_dict(
json_dict, self.get_schema().get_json_schema_dict())
version = python_dict.get('version')
if version not in self.SCHEMA_VERSIONS:
errors.append('Version %s not supported.' % version)
else:
python_dict = self.transform_after_editor_hook(python_dict)
self.validate(python_dict, key, version, errors)
except ValueError as err:
errors.append(str(err))
if errors:
self.validation_error('\n'.join(errors), key=key)
return
if key:
item = self.DAO.DTO(key, python_dict)
else:
item = self.DAO.DTO(None, python_dict)
self.pre_save_hook(item)
key_after_save = self.DAO.save(item)
self.after_save_hook()
transforms.send_json_response(
self, 200, 'Saved.', payload_dict={'key': key_after_save})
def delete(self):
"""Delete the Entity in response to REST request."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN, {'key': key}):
return
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
item = self.DAO.load(key)
if not item:
transforms.send_json_response(
self, 404, 'Not found.', {'key': key})
return
if self.is_deletion_allowed(item):
self.DAO.delete(item)
transforms.send_json_response(self, 200, 'Deleted.')
def get(self):
"""Respond to the REST GET verb with the contents of the item."""
key = self.request.get('key')
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
if key:
item = self.DAO.load(key)
version = item.dict.get('version')
if version not in self.SCHEMA_VERSIONS:
transforms.send_json_response(
self, 403, 'Version %s not supported.' % version,
{'key': key})
return
display_dict = copy.copy(item.dict)
display_dict['id'] = item.id
payload_dict = self.transform_for_editor_hook(display_dict)
else:
payload_dict = self.get_default_content()
transforms.send_json_response(
self, 200, 'Success',
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_TOKEN))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Messages used in the dashboard."""
__author__ = 'John Orr (jorr@google.com)'
from common import safe_dom
ABOUT_THE_COURSE_DESCRIPTION = safe_dom.assemble_text_message("""
This information is configured by an administrator from the Admin pages.
""", None)
ADMIN_PREFERENCES_DESCRIPTION = safe_dom.assemble_text_message("""
Preferences settings for individual course admins.
""", None)
ADMINISTERED_COURSES_DESCRIPTION = safe_dom.assemble_text_message("""
Courses for which you have administrator privileges
""", None)
ASSESSMENT_EDITOR_DESCRIPTION = safe_dom.assemble_text_message(
None, 'https://code.google.com/p/course-builder/wiki/CreateAssessments')
ASSETS_DESCRIPTION = safe_dom.assemble_text_message("""
These are all the assets for your course. You can upload new images and
documents here, after which you can use them in your lessons and activities.
You may create, edit, and delete activities and assessments from the Outline
page. All other assets must be edited by an administrator.
""", None)
ASSIGNMENTS_MENU_DESCRIPTION = safe_dom.assemble_text_message("""
Select a peer-reviewed assignment and enter a student's email address to view
their assignment submission and any associated reviews.
""", None)
CONTENTS_OF_THE_COURSE_DESCRIPTION = safe_dom.assemble_text_message("""
The course.yaml file contains all course-level settings. It can be
modified from other settings sub-tabs, or directly edited in its
raw form here.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
COURSE_ADMIN_DESCRIPTION = safe_dom.assemble_text_message("""
Admin settings for users who are course authors but not
site administrators.
""", None)
COURSE_OUTLINE_DESCRIPTION = safe_dom.assemble_text_message(
'Build, organize and preview your course here.',
'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
COURSE_OUTLINE_EDITOR_DESCRIPTION = safe_dom.assemble_text_message("""
Click up/down arrows to re-order units, or lessons within units. To move a
lesson between units, edit that lesson from the outline page and change its
parent unit.
""", None)
COURSE_TEMPLATE_DESCRIPTION = safe_dom.assemble_text_message("""
The course_template.yaml file provides default values for course settings.
These values are not dynamically editable, but you can override them
by editing your course.yaml file directly, or by changing settings in
the other Settings sub-tabs.
You can also change the default settings for all courses by editing
the course_template.yaml file on disk and re-pushing CourseBuilder to
AppEngine. Changing the defaults in the file will not erase or
override any course-specific settings you may have made.
""", None)
DATA_FILES_DESCRIPTION = safe_dom.assemble_text_message("""
The lesson.csv file contains the contents of your lesson. The unit.csv file
contains the course related content shown on the homepage. These files are
located in your Course Builder installation. Edit them directly with an editor
like Notepad++. Be careful, some editors will add extra characters, which may
prevent the uploading of these files.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
EDIT_SETTINGS_DESCRIPTION = safe_dom.assemble_text_message("""
The course.yaml file contains many course settings.
""", 'https://code.google.com/p/course-builder/wiki/CourseSettings')
EDIT_HTML_HOOK_DESCRIPTION = safe_dom.assemble_text_message("""
HTML hooks are snippets of HTML code that are inserted at different points on
the pages of a course. Editing these snippets here permits you to make
global changes to these items.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
IMPORT_COURSE_DESCRIPTION = safe_dom.assemble_text_message("""
Import the contents of another course into this course. Both courses must be on
the same Google App Engine instance.
""", None)
LINK_EDITOR_DESCRIPTION = safe_dom.assemble_text_message("""
Links will appear in your outline and will take students directly to the URL.
""", None)
PAGES_DESCRIPTION = safe_dom.assemble_text_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
ROLES_DESCRIPTION = """
Manage the different roles associated with your course.
A role binds a set of permissions to a set of users. The role editor allows you
to assign any of the permissions currently registered by the enabled modules.
"""
SETTINGS_DESCRIPTION = safe_dom.assemble_text_message(
None, 'https://code.google.com/p/course-builder/wiki/Dashboard#Settings')
UNIT_EDITOR_DESCRIPTION = safe_dom.assemble_text_message("""
Units contain lessons and acitivities.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Outline')
UPLOAD_ASSET_DESCRIPTION = safe_dom.assemble_text_message("""
Choose a file to upload to this Google App Engine instance. Learn more about
file storage and hosting.
""", 'https://code.google.com/p/course-builder/wiki/Dashboard#Assets')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting unit and lesson editing."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import logging
import urllib
import messages
from common import utils as common_utils
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import resources_display
from models import custom_units
from models import roles
from models import transforms
from modules.oeditor import oeditor
from tools import verify
class CourseOutlineRights(object):
"""Manages view/edit rights for course outline."""
@classmethod
def can_view(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class UnitLessonEditor(ApplicationHandler):
"""An editor for the unit and lesson titles."""
HIDE_ACTIVITY_ANNOTATIONS = [
(['properties', 'activity_title', '_inputex'], {'_type': 'hidden'}),
(['properties', 'activity_listed', '_inputex'], {'_type': 'hidden'}),
(['properties', 'activity', '_inputex'], {'_type': 'hidden'}),
]
def get_import_course(self):
"""Shows setup form for course import."""
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
annotations = ImportCourseRESTHandler.SCHEMA_ANNOTATIONS_DICT()
if not annotations:
template_values['main_content'] = 'No courses to import from.'
self.render_page(template_values)
return
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(ImportCourseRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self,
ImportCourseRESTHandler.SCHEMA_JSON,
annotations,
None, rest_url, exit_url,
auto_return=True,
save_button_caption='Import',
required_modules=ImportCourseRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Import Course')
template_values['page_description'] = messages.IMPORT_COURSE_DESCRIPTION
template_values['main_content'] = form_html
self.render_page(template_values)
def post_add_lesson(self):
"""Adds new lesson to a first unit of the course."""
course = courses.Course(self)
target_unit = None
if self.request.get('unit_id'):
target_unit = course.find_unit_by_id(self.request.get('unit_id'))
else:
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_UNIT:
target_unit = unit
break
if target_unit:
lesson = course.add_lesson(target_unit)
course.save()
# TODO(psimakov): complete 'edit_lesson' view
self.redirect(self.get_action_url(
'edit_lesson', key=lesson.lesson_id,
extra_args={'is_newly_created': 1}))
else:
self.redirect('/dashboard')
def post_add_unit(self):
"""Adds new unit to a course."""
course = courses.Course(self)
unit = course.add_unit()
course.save()
self.redirect(self.get_action_url(
'edit_unit', key=unit.unit_id, extra_args={'is_newly_created': 1}))
def post_add_link(self):
"""Adds new link to a course."""
course = courses.Course(self)
link = course.add_link()
link.href = ''
course.save()
self.redirect(self.get_action_url(
'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))
def post_add_assessment(self):
"""Adds new assessment to a course."""
course = courses.Course(self)
assessment = course.add_assessment()
course.save()
self.redirect(self.get_action_url(
'edit_assessment', key=assessment.unit_id,
extra_args={'is_newly_created': 1}))
def post_add_custom_unit(self):
"""Adds a custom unit to a course."""
course = courses.Course(self)
custom_unit_type = self.request.get('unit_type')
custom_unit = course.add_custom_unit(custom_unit_type)
course.save()
self.redirect(self.get_action_url(
'edit_custom_unit', key=custom_unit.unit_id,
extra_args={'is_newly_created': 1,
'unit_type': custom_unit_type}))
def post_set_draft_status(self):
"""Sets the draft status of a course component.
Only works with CourseModel13 courses, but the REST handler
is only called with this type of courses.
"""
key = self.request.get('key')
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
component_type = self.request.get('type')
if component_type == 'unit':
course_component = course.find_unit_by_id(key)
elif component_type == 'lesson':
course_component = course.find_lesson_by_id(None, key)
else:
transforms.send_json_response(
self, 401, 'Invalid key.', {'key': key})
return
set_draft = self.request.get('set_draft')
if set_draft == '1':
set_draft = True
elif set_draft == '0':
set_draft = False
else:
transforms.send_json_response(
self, 401, 'Invalid set_draft value, expected 0 or 1.',
{'set_draft': set_draft}
)
return
course_component.now_available = not set_draft
course.save()
transforms.send_json_response(
self,
200,
'Draft status set to %s.' % (
resources_display.DRAFT_TEXT if set_draft else
resources_display.PUBLISHED_TEXT
), {
'is_draft': set_draft
}
)
return
def _render_edit_form_for(
self, rest_handler_cls, title, schema=None, annotations_dict=None,
delete_xsrf_token='delete-unit', page_description=None,
additional_dirs=None, extra_js_files=None, extra_css_files=None):
"""Renders an editor form for a given REST handler class."""
annotations_dict = annotations_dict or []
if schema:
schema_json = schema.get_json_schema()
annotations_dict = schema.get_schema_dict() + annotations_dict
else:
schema_json = rest_handler_cls.SCHEMA_JSON
if not annotations_dict:
annotations_dict = rest_handler_cls.SCHEMA_ANNOTATIONS_DICT
key = self.request.get('key')
extra_args = {}
if self.request.get('is_newly_created'):
extra_args['is_newly_created'] = 1
exit_url = self.canonicalize_url('/dashboard')
rest_url = self.canonicalize_url(rest_handler_cls.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(rest_handler_cls.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(delete_xsrf_token))
}))
def extend_list(target_list, ext_name):
# Extend the optional arg lists such as extra_js_files by an
# optional list field on the REST handler class. Used to provide
# seams for modules to add js files, etc. See LessonRESTHandler
if hasattr(rest_handler_cls, ext_name):
target_list = target_list or []
return (target_list or []) + getattr(rest_handler_cls, ext_name)
return target_list
form_html = oeditor.ObjectEditor.get_html_for(
self,
schema_json,
annotations_dict,
key, rest_url, exit_url,
extra_args=extra_args,
delete_url=delete_url, delete_method='delete',
read_only=not self.app_context.is_editable_fs(),
required_modules=rest_handler_cls.REQUIRED_MODULES,
additional_dirs=extend_list(additional_dirs, 'ADDITIONAL_DIRS'),
extra_css_files=extend_list(extra_css_files, 'EXTRA_CSS_FILES'),
extra_js_files=extend_list(extra_js_files, 'EXTRA_JS_FILES'))
template_values = {}
template_values['page_title'] = self.format_title('Edit %s' % title)
if page_description:
template_values['page_description'] = page_description
template_values['main_content'] = form_html
self.render_page(template_values)
def get_edit_unit(self):
"""Shows unit editor."""
self._render_edit_form_for(
UnitRESTHandler, 'Unit',
page_description=messages.UNIT_EDITOR_DESCRIPTION,
annotations_dict=UnitRESTHandler.get_annotations_dict(
courses.Course(self), int(self.request.get('key'))))
def get_edit_custom_unit(self):
"""Shows custom_unit_editor."""
custom_unit_type = self.request.get('unit_type')
custom_unit = custom_units.UnitTypeRegistry.get(custom_unit_type)
rest_handler = custom_unit.rest_handler
self._render_edit_form_for(
rest_handler,
custom_unit.name,
page_description=rest_handler.DESCRIPTION,
annotations_dict=rest_handler.get_schema_annotations_dict(
courses.Course(self)))
def get_edit_link(self):
"""Shows link editor."""
self._render_edit_form_for(
LinkRESTHandler, 'Link',
page_description=messages.LINK_EDITOR_DESCRIPTION)
def get_edit_assessment(self):
"""Shows assessment editor."""
self._render_edit_form_for(
AssessmentRESTHandler, 'Assessment',
page_description=messages.ASSESSMENT_EDITOR_DESCRIPTION,
extra_js_files=['assessment_editor_lib.js', 'assessment_editor.js'])
def get_edit_lesson(self):
"""Shows the lesson/activity editor."""
key = self.request.get('key')
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
annotations_dict = (
None if lesson.has_activity
else UnitLessonEditor.HIDE_ACTIVITY_ANNOTATIONS)
schema = LessonRESTHandler.get_schema(course, key)
if courses.has_only_new_style_activities(course):
schema.get_property('objectives').extra_schema_dict_values[
'excludedCustomTags'] = set(['gcb-activity'])
self._render_edit_form_for(
LessonRESTHandler, 'Lessons and Activities',
schema=schema,
annotations_dict=annotations_dict,
delete_xsrf_token='delete-lesson')
class CommonUnitRESTHandler(BaseRESTHandler):
"""A common super class for all unit REST handlers."""
# These functions are called with an updated unit object whenever a
# change is saved.
POST_SAVE_HOOKS = []
def unit_to_dict(self, unit):
"""Converts a unit to a dictionary representation."""
return resources_display.UnitTools(self.get_course()).unit_to_dict(unit)
def apply_updates(self, unit, updated_unit_dict, errors):
"""Applies changes to a unit; modifies unit input argument."""
resources_display.UnitTools(courses.Course(self)).apply_updates(
unit, updated_unit_dict, errors)
def get(self):
"""A GET REST method shared by all unit types."""
key = self.request.get('key')
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
message = ['Success.']
if self.request.get('is_newly_created'):
unit_type = verify.UNIT_TYPE_NAMES[unit.type].lower()
message.append(
'New %s has been created and saved.' % unit_type)
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=self.unit_to_dict(unit),
xsrf_token=XsrfTokenManager.create_xsrf_token('put-unit'))
def put(self):
"""A PUT REST method shared by all unit types."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'put-unit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
unit = courses.Course(self).find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updated_unit_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
errors = []
self.apply_updates(unit, updated_unit_dict, errors)
if not errors:
course = courses.Course(self)
assert course.update_unit(unit)
course.save()
common_utils.run_hooks(self.POST_SAVE_HOOKS, unit)
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-unit', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
unit = course.find_unit_by_id(key)
if not unit:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
course.delete_unit(unit)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
class UnitRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to unit."""
URI = '/rest/course/unit'
SCHEMA = resources_display.ResourceUnit.get_schema(course=None, key=None)
SCHEMA_JSON = SCHEMA.get_json_schema()
SCHEMA_DICT = SCHEMA.get_json_schema_dict()
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable',
'inputex-list', 'inputex-hidden', 'inputex-number', 'inputex-integer',
'inputex-checkbox', 'gcb-rte']
@classmethod
def get_annotations_dict(cls, course, this_unit_id):
# The set of available assesments needs to be dynamically
# generated and set as selection choices on the form.
# We want to only show assessments that are not already
# selected by other units.
available_assessments = {}
referenced_assessments = {}
for unit in course.get_units():
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
model_version = course.get_assessment_model_version(unit)
track_labels = course.get_unit_track_labels(unit)
# Don't allow selecting old-style assessments, which we
# can't display within Unit page.
# Don't allow selection of assessments with parents
if (model_version != courses.ASSESSMENT_MODEL_VERSION_1_4 and
not track_labels):
available_assessments[unit.unit_id] = unit
elif (unit.type == verify.UNIT_TYPE_UNIT and
this_unit_id != unit.unit_id):
if unit.pre_assessment:
referenced_assessments[unit.pre_assessment] = True
if unit.post_assessment:
referenced_assessments[unit.post_assessment] = True
for referenced in referenced_assessments:
if referenced in available_assessments:
del available_assessments[referenced]
schema = resources_display.ResourceUnit.get_schema(course, this_unit_id)
choices = [(-1, '-- None --')]
for assessment_id in sorted(available_assessments):
choices.append(
(assessment_id, available_assessments[assessment_id].title))
schema.get_property('pre_assessment').set_select_data(choices)
schema.get_property('post_assessment').set_select_data(choices)
return schema.get_schema_dict()
class LinkRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to link."""
URI = '/rest/course/link'
SCHEMA = resources_display.ResourceLink.get_schema(course=None, key=None)
SCHEMA_JSON = SCHEMA.get_json_schema()
SCHEMA_DICT = SCHEMA.get_json_schema_dict()
SCHEMA_ANNOTATIONS_DICT = SCHEMA.get_schema_dict()
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable',
'inputex-list', 'inputex-hidden', 'inputex-number', 'inputex-checkbox']
class ImportCourseRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to course import."""
URI = '/rest/course/import'
SCHEMA_JSON = """
{
"id": "Import Course Entity",
"type": "object",
"description": "Import Course",
"properties": {
"course" : {"type": "string"}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
REQUIRED_MODULES = [
'inputex-string', 'inputex-select', 'inputex-uneditable']
@classmethod
def _get_course_list(cls):
# Make a list of courses user has the rights to.
course_list = []
for acourse in sites.get_all_courses():
if not roles.Roles.is_course_admin(acourse):
continue
if acourse == sites.get_course_for_current_request():
continue
atitle = '%s (%s)' % (acourse.get_title(), acourse.get_slug())
course_list.append({
'value': acourse.raw, 'label': cgi.escape(atitle)})
return course_list
@classmethod
def SCHEMA_ANNOTATIONS_DICT(cls):
"""Schema annotations are dynamic and include a list of courses."""
course_list = cls._get_course_list()
if not course_list:
return None
# Format annotations.
return [
(['title'], 'Import Course'),
(
['properties', 'course', '_inputex'],
{
'label': 'Available Courses',
'_type': 'select',
'choices': course_list})]
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
first_course_in_dropdown = self._get_course_list()[0]['value']
transforms.send_json_response(
self, 200, None,
payload_dict={'course': first_course_in_dropdown},
xsrf_token=XsrfTokenManager.create_xsrf_token(
'import-course'))
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, 'import-course', {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
course_raw = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)['course']
source = None
for acourse in sites.get_all_courses():
if acourse.raw == course_raw:
source = acourse
break
if not source:
transforms.send_json_response(
self, 404, 'Object not found.', {'raw': course_raw})
return
course = courses.Course(self)
errors = []
try:
course.import_from(source, errors)
except Exception as e: # pylint: disable=broad-except
logging.exception(e)
errors.append('Import failed: %s' % e)
if errors:
transforms.send_json_response(self, 412, '\n'.join(errors))
return
course.save()
transforms.send_json_response(self, 200, 'Imported.')
class AssessmentRESTHandler(CommonUnitRESTHandler):
"""Provides REST API to assessment."""
URI = '/rest/course/assessment'
SCHEMA = resources_display.ResourceAssessment.get_schema(
course=None, key=None)
SCHEMA_JSON = SCHEMA.get_json_schema()
SCHEMA_DICT = SCHEMA.get_json_schema_dict()
SCHEMA_ANNOTATIONS_DICT = SCHEMA.get_schema_dict()
REQUIRED_MODULES = [
'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-textarea',
'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-checkbox', 'inputex-list']
class UnitLessonTitleRESTHandler(BaseRESTHandler):
"""Provides REST API to reorder unit and lesson titles."""
URI = '/rest/course/outline'
XSRF_TOKEN = 'unit-lesson-reorder'
SCHEMA_JSON = """
{
"type": "object",
"description": "Course Outline",
"properties": {
"outline": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"},
"lessons": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"title": {"type": "string"}
}
}
}
}
}
}
}
}
"""
SCHEMA_DICT = transforms.loads(SCHEMA_JSON)
def put(self):
"""Handles REST PUT verb with JSON payload."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': None}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
payload = request.get('payload')
payload_dict = transforms.json_to_dict(
transforms.loads(payload), self.SCHEMA_DICT)
course = courses.Course(self)
course.reorder_units(payload_dict['outline'])
course.save()
transforms.send_json_response(self, 200, 'Saved.')
class LessonRESTHandler(BaseRESTHandler):
"""Provides REST API to handle lessons and activities."""
URI = '/rest/course/lesson'
REQUIRED_MODULES = [
'inputex-string', 'gcb-rte', 'inputex-select', 'inputex-textarea',
'inputex-uneditable', 'inputex-checkbox', 'inputex-hidden']
# Enable modules to specify locations to load JS and CSS files
ADDITIONAL_DIRS = []
# Enable modules to add css files to be shown in the editor page.
EXTRA_CSS_FILES = []
# Enable modules to add js files to be shown in the editor page.
EXTRA_JS_FILES = []
# Enable other modules to add transformations to the schema.Each member must
# be a function of the form:
# callback(lesson_field_registry)
# where the argument is the root FieldRegistry for the schema
SCHEMA_LOAD_HOOKS = []
# Enable other modules to add transformations to the load. Each member must
# be a function of the form:
# callback(lesson, lesson_dict)
# and the callback should update fields of the lesson_dict, which will be
# returned to the caller of a GET request.
PRE_LOAD_HOOKS = []
# Enable other modules to add transformations to the save. Each member must
# be a function of the form:
# callback(lesson, lesson_dict)
# and the callback should update fields of the lesson with values read from
# the dict which was the payload of a PUT request.
PRE_SAVE_HOOKS = []
# These functions are called with an updated lesson object whenever a
# change is saved.
POST_SAVE_HOOKS = []
@classmethod
def get_schema(cls, course, key):
lesson_schema = resources_display.ResourceLesson.get_schema(course, key)
common_utils.run_hooks(cls.SCHEMA_LOAD_HOOKS, lesson_schema)
return lesson_schema
@classmethod
def get_lesson_dict(cls, course, lesson):
return cls.get_lesson_dict_for(course, lesson)
@classmethod
def get_lesson_dict_for(cls, course, lesson):
lesson_dict = resources_display.ResourceLesson.get_data_dict(
course, lesson.lesson_id)
common_utils.run_hooks(cls.PRE_LOAD_HOOKS, lesson, lesson_dict)
return lesson_dict
def get(self):
"""Handles GET REST verb and returns lesson object as JSON payload."""
if not CourseOutlineRights.can_view(self):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
key = self.request.get('key')
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
assert lesson
payload_dict = self.get_lesson_dict(course, lesson)
message = ['Success.']
if self.request.get('is_newly_created'):
message.append('New lesson has been created and saved.')
transforms.send_json_response(
self, 200, '\n'.join(message),
payload_dict=payload_dict,
xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit'))
def put(self):
"""Handles PUT REST verb to save lesson and associated activity."""
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, 'lesson-edit', {'key': key}):
return
if not CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
payload = request.get('payload')
updates_dict = transforms.json_to_dict(
transforms.loads(payload),
self.get_schema(course, key).get_json_schema_dict())
lesson.title = updates_dict['title']
lesson.unit_id = updates_dict['unit_id']
lesson.scored = (updates_dict['scored'] == 'scored')
lesson.objectives = updates_dict['objectives']
lesson.video = updates_dict['video']
lesson.notes = updates_dict['notes']
lesson.auto_index = updates_dict['auto_index']
lesson.activity_title = updates_dict['activity_title']
lesson.activity_listed = updates_dict['activity_listed']
lesson.manual_progress = updates_dict['manual_progress']
lesson.now_available = not updates_dict['is_draft']
activity = updates_dict.get('activity', '').strip()
errors = []
if activity:
if lesson.has_activity:
course.set_activity_content(lesson, activity, errors=errors)
else:
errors.append('Old-style activities are not supported.')
else:
lesson.has_activity = False
fs = self.app_context.fs
path = fs.impl.physical_to_logical(course.get_activity_filename(
lesson.unit_id, lesson.lesson_id))
if fs.isfile(path):
fs.delete(path)
if not errors:
common_utils.run_hooks(self.PRE_SAVE_HOOKS, lesson, updates_dict)
assert course.update_lesson(lesson)
course.save()
common_utils.run_hooks(self.POST_SAVE_HOOKS, lesson)
transforms.send_json_response(self, 200, 'Saved.')
else:
transforms.send_json_response(self, 412, '\n'.join(errors))
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, 'delete-lesson', {'key': key}):
return
if not CourseOutlineRights.can_delete(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
course = courses.Course(self)
lesson = course.find_lesson_by_id(None, key)
if not lesson:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
assert course.delete_lesson(lesson)
course.save()
transforms.send_json_response(self, 200, 'Deleted.')
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dashboard module. Separated here to break include loops."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import os
import appengine_config
from controllers import sites
from models import vfs
RESOURCES_PATH = '/modules/dashboard/resources'
RESOURCES_DIR = os.path.join(appengine_config.BUNDLE_ROOT,
RESOURCES_PATH.lstrip('/'))
def build_assets_url(tab_name):
return '/dashboard?action=assets&tab=%s' % tab_name
def list_files(handler, subfolder, merge_local_files=False, all_paths=None):
"""Makes a list of files in a subfolder.
Args:
handler: webapp request handler.
subfolder: string. Relative path of the subfolder to list.
merge_local_files: boolean. If True, the returned list will
contain files found on either the datastore filesystem or the
read-only local filesystem. If a file is found on both, its
datastore filesystem version will trump its local filesystem
version.
all_paths: list. A list of all file paths in the underlying file
system.
Returns:
List of relative, normalized file path strings.
"""
home = sites.abspath(handler.app_context.get_home_folder(), '/')
_paths = None
if all_paths is not None:
_paths = []
for _path in all_paths:
if _path.startswith(sites.abspath(
handler.app_context.get_home_folder(), subfolder)):
_paths.append(_path)
_paths = set(_paths)
else:
_paths = set(handler.app_context.fs.list(
sites.abspath(handler.app_context.get_home_folder(), subfolder)))
if merge_local_files:
local_fs = vfs.LocalReadOnlyFileSystem(logical_home_folder='/')
_paths = _paths.union(set([
os.path.join(appengine_config.BUNDLE_ROOT, path) for path in
local_fs.list(subfolder[1:])]))
result = []
for abs_filename in _paths:
filename = os.path.relpath(abs_filename, home)
result.append(vfs.AbstractFileSystem.normpath(filename))
return sorted(result)
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of question_groups."""
__author__ = 'John Orr (jorr@google.com)'
from models import transforms
from models import models
from models import resources_display
from modules.dashboard import dto_editor
from modules.dashboard import utils as dashboard_utils
class QuestionGroupManagerAndEditor(dto_editor.BaseDatastoreAssetEditor):
"""An editor for editing and managing question_groups."""
def qgmae_prepare_template(self, key):
template_values = {}
template_values['page_title'] = self.format_title('Edit Question Group')
template_values['main_content'] = self.get_form(
QuestionGroupRESTHandler, key,
dashboard_utils.build_assets_url('questions'))
return template_values
def get_add_question_group(self):
self.render_page(self.qgmae_prepare_template(''), 'assets', 'questions')
def get_edit_question_group(self):
self.render_page(self.qgmae_prepare_template(self.request.get('key')),
'assets', 'questions')
def post_add_to_question_group(self):
try:
question_id = long(self.request.get('question_id'))
question_dto = models.QuestionDAO.load(question_id)
if question_dto is None:
raise ValueError()
except ValueError:
transforms.send_json_response(
self, 500, 'Invalid question id.',
{'question-id': self.request.get('question_id')}
)
return
try:
group_id = long(self.request.get('group_id'))
group_dto = models.QuestionGroupDAO.load(group_id)
if group_dto is None:
raise ValueError()
except ValueError:
transforms.send_json_response(
self, 500, 'Invalid question group id.',
{'group-id': self.request.get('group_id')}
)
return
weight = self.request.get('weight')
try:
float(weight)
except ValueError:
transforms.send_json_response(
self, 500, 'Invalid weight. Must be a numeric value.', {
'weight': weight})
return
group_dto.add_question(question_id, weight)
models.QuestionGroupDAO.save(group_dto)
transforms.send_json_response(
self,
200,
'%s added to %s.' % (
question_dto.description, group_dto.description
),
{
'group-id': group_dto.id,
'question-id': question_dto.id
}
)
return
class QuestionGroupRESTHandler(dto_editor.BaseDatastoreRestHandler):
"""REST handler for editing question_groups."""
URI = '/rest/question_group'
REQUIRED_MODULES = [
'gcb-rte', 'inputex-hidden', 'inputex-select', 'inputex-string',
'inputex-list']
EXTRA_CSS_FILES = ['question_group_editor.css']
EXTRA_JS_FILES = ['question_group_editor.js']
XSRF_TOKEN = 'question-group-edit'
SCHEMA_VERSIONS = ['1.5']
DAO = models.QuestionGroupDAO
@classmethod
def get_schema(cls):
return resources_display.ResourceQuestionGroup.get_schema(
course=None, key=None)
def get_default_content(self):
return {'version': self.SCHEMA_VERSIONS[0]}
def validate(self, question_group_dict, key, schema_version, errors):
"""Validate the question group data sent from the form."""
if not question_group_dict['description'].strip():
errors.append('The question group must have a description.')
descriptions = {question_group.description for question_group
in models.QuestionGroupDAO.get_all()
if not key or question_group.id != long(key)}
if question_group_dict['description'] in descriptions:
errors.append('The description must be different '
'from existing question groups.')
items = question_group_dict['items']
for index in range(0, len(items)):
item = items[index]
try:
float(item['weight'])
except ValueError:
errors.append(
'Item %s must have a numeric weight.' % (index + 1))
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of questions."""
__author__ = 'John Orr (jorr@google.com)'
import copy
from common import schema_fields
from models import roles
from models import transforms
from models import models
from models import resources_display
from modules.assessment_tags import gift
from modules.dashboard import dto_editor
from modules.dashboard import utils as dashboard_utils
class QuestionManagerAndEditor(dto_editor.BaseDatastoreAssetEditor):
"""An editor for editing and managing questions."""
def qmae_prepare_template(self, rest_handler, key='', auto_return=False):
"""Build the Jinja template for adding a question."""
template_values = {}
template_values['page_title'] = self.format_title('Edit Question')
template_values['main_content'] = self.get_form(
rest_handler, key,
dashboard_utils.build_assets_url('questions'),
auto_return=auto_return)
return template_values
def get_add_mc_question(self):
self.render_page(self.qmae_prepare_template(McQuestionRESTHandler),
'assets', 'questions')
def get_add_sa_question(self):
self.render_page(self.qmae_prepare_template(SaQuestionRESTHandler),
'assets', 'questions')
def get_import_gift_questions(self):
self.render_page(
self.qmae_prepare_template(
GiftQuestionRESTHandler, auto_return=True),
'assets', 'questions')
def get_edit_question(self):
key = self.request.get('key')
question = models.QuestionDAO.load(key)
if not question:
raise Exception('No question found')
if question.type == models.QuestionDTO.MULTIPLE_CHOICE:
self.render_page(
self.qmae_prepare_template(McQuestionRESTHandler, key=key),
'assets', 'questions')
elif question.type == models.QuestionDTO.SHORT_ANSWER:
self.render_page(
self.qmae_prepare_template(SaQuestionRESTHandler, key=key),
'assets', 'questions')
else:
raise Exception('Unknown question type: %s' % question.type)
def post_clone_question(self):
original_question = models.QuestionDAO.load(self.request.get('key'))
cloned_question = models.QuestionDAO.clone(original_question)
cloned_question.description += ' (clone)'
models.QuestionDAO.save(cloned_question)
class BaseQuestionRESTHandler(dto_editor.BaseDatastoreRestHandler):
"""Common methods for handling REST end points with questions."""
def sanitize_input_dict(self, json_dict):
json_dict['description'] = json_dict['description'].strip()
def is_deletion_allowed(self, question):
used_by = models.QuestionDAO.used_by(question.id)
if used_by:
group_names = sorted(['"%s"' % x.description for x in used_by])
transforms.send_json_response(
self, 403,
('Question in use by question groups:\n%s.\nPlease delete it '
'from those groups and try again.') % ',\n'.join(group_names),
{'key': question.id})
return False
else:
return True
def validate_no_description_collision(self, description, key, errors):
descriptions = {q.description for q in models.QuestionDAO.get_all()
if not key or q.id != long(key)}
if description in descriptions:
errors.append(
'The description must be different from existing questions.')
class McQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing multiple choice questions."""
URI = '/rest/question/mc'
REQUIRED_MODULES = [
'array-extras', 'gcb-rte', 'inputex-radio', 'inputex-select',
'inputex-string', 'inputex-list', 'inputex-number', 'inputex-hidden']
EXTRA_JS_FILES = ['mc_question_editor_lib.js', 'mc_question_editor.js']
XSRF_TOKEN = 'mc-question-edit'
SCHEMA_VERSIONS = ['1.5']
DAO = models.QuestionDAO
@classmethod
def get_schema(cls):
return resources_display.ResourceMCQuestion.get_schema(
course=None, key=None)
def pre_save_hook(self, question):
question.type = models.QuestionDTO.MULTIPLE_CHOICE
def transform_for_editor_hook(self, q_dict):
p_dict = copy.deepcopy(q_dict)
# InputEx does not correctly roundtrip booleans, so pass strings
p_dict['multiple_selections'] = (
'true' if q_dict.get('multiple_selections') else 'false')
return p_dict
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'question': '',
'description': '',
'multiple_selections': 'false',
'choices': [
{'score': '1', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''},
{'score': '0', 'text': '', 'feedback': ''}
]}
def validate(self, question_dict, key, version, errors):
# Currently only one version supported; version validity has already
# been checked.
self._validate15(question_dict, key, errors)
def _validate15(self, question_dict, key, errors):
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
if not question_dict['choices']:
errors.append('The question must have at least one choice.')
choices = question_dict['choices']
for index in range(0, len(choices)):
choice = choices[index]
if not choice['text'].strip():
errors.append('Choice %s has no response text.' % (index + 1))
try:
# Coefrce the score attrib into a python float
choice['score'] = float(choice['score'])
except ValueError:
errors.append(
'Choice %s must have a numeric score.' % (index + 1))
class SaQuestionRESTHandler(BaseQuestionRESTHandler):
"""REST handler for editing short answer questions."""
URI = '/rest/question/sa'
REQUIRED_MODULES = [
'gcb-rte', 'inputex-select', 'inputex-string', 'inputex-list',
'inputex-hidden', 'inputex-integer']
EXTRA_JS_FILES = []
XSRF_TOKEN = 'sa-question-edit'
SCHEMA_VERSIONS = ['1.5']
DAO = models.QuestionDAO
@classmethod
def get_schema(cls):
return resources_display.ResourceSAQuestion.get_schema(
course=None, key=None)
def pre_save_hook(self, question):
question.type = models.QuestionDTO.SHORT_ANSWER
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'question': '',
'description': '',
'graders': [{
'score': '1.0',
'matcher': 'case_insensitive',
'response': '',
'feedback': ''}]}
def validate(self, question_dict, key, version, errors):
# Currently only one version supported; version validity has already
# been checked.
self._validate15(question_dict, key, errors)
def _validate15(self, question_dict, key, errors):
if not question_dict['question'].strip():
errors.append('The question must have a non-empty body.')
if not question_dict['description']:
errors.append('The description must be non-empty.')
self.validate_no_description_collision(
question_dict['description'], key, errors)
try:
# Coerce the rows attrib into a python int
question_dict['rows'] = int(question_dict['rows'])
if question_dict['rows'] <= 0:
errors.append('Rows must be a positive whole number')
except ValueError:
errors.append('Rows must be a whole number')
try:
# Coerce the cols attrib into a python int
question_dict['columns'] = int(question_dict['columns'])
if question_dict['columns'] <= 0:
errors.append('Columns must be a positive whole number')
except ValueError:
errors.append('Columns must be a whole number')
if not question_dict['graders']:
errors.append('The question must have at least one answer.')
graders = question_dict['graders']
for index in range(0, len(graders)):
grader = graders[index]
assert grader['matcher'] in [
matcher for (matcher, unused_text)
in resources_display.ResourceSAQuestion.GRADER_TYPES]
if not grader['response'].strip():
errors.append('Answer %s has no response text.' % (index + 1))
try:
float(grader['score'])
except ValueError:
errors.append(
'Answer %s must have a numeric score.' % (index + 1))
class GiftQuestionRESTHandler(dto_editor.BaseDatastoreRestHandler):
"""REST handler for importing gift questions."""
URI = '/rest/question/gift'
REQUIRED_MODULES = [
'inputex-string', 'inputex-hidden', 'inputex-textarea']
EXTRA_JS_FILES = []
XSRF_TOKEN = 'import-gift-questions'
@classmethod
def get_schema(cls):
"""Get the InputEx schema for the short answer question editor."""
gift_questions = schema_fields.FieldRegistry(
'GIFT Questions',
description='One or more GIFT-formatted questions',
extra_schema_dict_values={'className': 'gift-container'})
gift_questions.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
gift_questions.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
extra_schema_dict_values={'className': 'gift-description'}))
gift_questions.add_property(schema_fields.SchemaField(
'questions', 'Questions', 'text', optional=True,
description=(
'List of <a href="https://docs.moodle.org/23/en/GIFT_format" '
'target="_blank"> GIFT question-types</a> supported by Course '
'Builder: Multiple choice, True-false, Short answer, and '
'Numerical.'),
extra_schema_dict_values={'className': 'gift-questions'}))
return gift_questions
def validate_question_descriptions(self, questions, errors):
descriptions = [q.description for q in models.QuestionDAO.get_all()]
for question in questions:
if question['description'] in descriptions:
errors.append(
('The description must be different '
'from existing questions.'))
def validate_group_description(self, group_description, errors):
descriptions = [gr.description
for gr in models.QuestionGroupDAO.get_all()]
if group_description in descriptions:
errors.append('Non-unique group description.')
def get_default_content(self):
return {
'questions': '',
'description': ''}
def convert_to_dtos(self, questions):
dtos = []
for question in questions:
question['version'] = models.QuestionDAO.VERSION
dto = models.QuestionDTO(None, question)
if dto.type == 'multi_choice':
dto.type = models.QuestionDTO.MULTIPLE_CHOICE
else:
dto.type = models.QuestionDTO.SHORT_ANSWER
dtos.append(dto)
return dtos
def create_group(self, description, question_ids):
group = {
'version': models.QuestionDAO.VERSION,
'description': description,
'introduction': '',
'items': [{
'question': str(x),
'weight': 1.0} for x in question_ids]}
return models.QuestionGroupDAO.create_question_group(group)
def put(self):
"""Store a QuestionGroupDTO and QuestionDTO in the datastore."""
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': None}):
return
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(self, 401, 'Access denied.')
return
payload = request.get('payload')
json_dict = transforms.loads(payload)
errors = []
try:
python_dict = transforms.json_to_dict(
json_dict, self.get_schema().get_json_schema_dict())
questions = gift.GiftParser.parse_questions(
python_dict['questions'])
self.validate_question_descriptions(questions, errors)
self.validate_group_description(
python_dict['description'], errors)
if not errors:
dtos = self.convert_to_dtos(questions)
question_ids = models.QuestionDAO.save_all(dtos)
self.create_group(python_dict['description'], question_ids)
except ValueError as e:
errors.append(str(e))
except gift.ParseError as e:
errors.append(str(e))
except models.CollisionError as e:
errors.append(str(e))
if errors:
self.validation_error('\n'.join(errors))
return
msg = 'Saved: %s.' % python_dict['description']
transforms.send_json_response(self, 200, msg)
return
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import copy
import datetime
import jinja2
import logging
import os
import urllib
import appengine_config
from admin_preferences_editor import AdminPreferencesEditor
from admin_preferences_editor import AdminPreferencesRESTHandler
from course_settings import CourseSettingsHandler
from course_settings import CourseSettingsRESTHandler
from course_settings import HtmlHookHandler
from course_settings import HtmlHookRESTHandler
from filer import AssetItemRESTHandler
from filer import FileManagerAndEditor
from filer import FilesItemRESTHandler
from filer import TextAssetRESTHandler
from label_editor import LabelManagerAndEditor
from label_editor import LabelRestHandler
import messages
from peer_review import AssignmentManager
from question_editor import GiftQuestionRESTHandler
from question_editor import McQuestionRESTHandler
from question_editor import QuestionManagerAndEditor
from question_editor import SaQuestionRESTHandler
from question_group_editor import QuestionGroupManagerAndEditor
from question_group_editor import QuestionGroupRESTHandler
from role_editor import RoleManagerAndEditor
from role_editor import RoleRESTHandler
import student_answers_analytics
from unit_lesson_editor import AssessmentRESTHandler
from unit_lesson_editor import ImportCourseRESTHandler
from unit_lesson_editor import LessonRESTHandler
from unit_lesson_editor import LinkRESTHandler
from unit_lesson_editor import UnitLessonEditor
from unit_lesson_editor import UnitLessonTitleRESTHandler
from unit_lesson_editor import UnitRESTHandler
import utils as dashboard_utils
from common import crypto
from common import jinja_utils
from common import safe_dom
from common import tags
from common.utils import Namespace
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import CourseHandler
from controllers.utils import ReflectiveRequestHandler
from models import analytics
from models import config
from models import courses
from models import resources_display
from models import custom_modules
from models import custom_units
from models import data_sources
from models import models
from models import roles
from models import transforms
from models import vfs
from models.models import LabelDAO
from models.models import QuestionDAO
from models.models import QuestionDTO
from models.models import QuestionGroupDAO
from models.models import RoleDAO
from modules.dashboard import tabs
from modules.data_source_providers import rest_providers
from modules.data_source_providers import synchronous_providers
from modules.oeditor import oeditor
from modules.search.search import SearchDashboardHandler
from tools import verify
from google.appengine.api import app_identity
from google.appengine.api import users
custom_module = None
class DashboardHandler(
AdminPreferencesEditor, AssignmentManager, CourseHandler,
CourseSettingsHandler, FileManagerAndEditor, HtmlHookHandler,
LabelManagerAndEditor, QuestionGroupManagerAndEditor,
QuestionManagerAndEditor, ReflectiveRequestHandler, RoleManagerAndEditor,
SearchDashboardHandler, UnitLessonEditor):
"""Handles all pages and actions required for managing a course."""
default_tab_action = 'outline'
# This dictionary allows the dashboard module to optionally nominate a
# specific sub-tab within each major tab group as the default sub-tab to
# open when first navigating to that major tab. The default may be
# explicitly specified here so that sub-tab registrations from other
# modules do not inadvertently take over the first position due to order
# of module registration.
default_subtab_action = collections.defaultdict(
lambda: None,
{'analytics': 'students'})
get_actions = [
default_tab_action, 'assets', 'settings', 'analytics', 'search',
'edit_basic_settings', 'edit_settings', 'edit_unit_lesson',
'edit_unit', 'edit_link', 'edit_lesson', 'edit_assessment',
'manage_asset', 'manage_text_asset', 'import_course',
'edit_assignment', 'add_mc_question', 'add_sa_question',
'edit_question', 'add_question_group', 'edit_question_group',
'add_label', 'edit_label', 'edit_html_hook', 'question_preview',
'roles', 'add_role', 'edit_role', 'edit_custom_unit',
'import_gift_questions']
# Requests to these handlers automatically go through an XSRF token check
# that is implemented in ReflectiveRequestHandler.
post_actions = [
'create_or_edit_settings', 'add_unit',
'add_link', 'add_assessment', 'add_lesson', 'index_course',
'clear_index', 'edit_course_settings', 'add_reviewer',
'delete_reviewer', 'edit_admin_preferences', 'set_draft_status',
'add_to_question_group', 'course_availability', 'course_browsability',
'clone_question', 'add_custom_unit']
_nav_mappings = collections.OrderedDict([
('outline', 'Outline'),
('assets', 'Assets'),
('settings', 'Settings'),
('roles', 'Roles'),
('analytics', 'Analytics'),
('search', 'Search'),
('edit_assignment', 'Peer Review')])
child_routes = [
(AdminPreferencesRESTHandler.URI, AdminPreferencesRESTHandler),
(AssessmentRESTHandler.URI, AssessmentRESTHandler),
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(CourseSettingsRESTHandler.URI, CourseSettingsRESTHandler),
(HtmlHookRESTHandler.URI, HtmlHookRESTHandler),
(FilesItemRESTHandler.URI, FilesItemRESTHandler),
(ImportCourseRESTHandler.URI, ImportCourseRESTHandler),
(LabelRestHandler.URI, LabelRestHandler),
(LessonRESTHandler.URI, LessonRESTHandler),
(LinkRESTHandler.URI, LinkRESTHandler),
(UnitLessonTitleRESTHandler.URI, UnitLessonTitleRESTHandler),
(UnitRESTHandler.URI, UnitRESTHandler),
(McQuestionRESTHandler.URI, McQuestionRESTHandler),
(GiftQuestionRESTHandler.URI, GiftQuestionRESTHandler),
(SaQuestionRESTHandler.URI, SaQuestionRESTHandler),
(TextAssetRESTHandler.URI, TextAssetRESTHandler),
(QuestionGroupRESTHandler.URI, QuestionGroupRESTHandler),
(RoleRESTHandler.URI, RoleRESTHandler)]
# List of functions which are used to generate content displayed at the top
# of every dashboard page. Use this with caution, as it is extremely
# invasive of the UX. Each function receives the handler as arg and returns
# an object to be inserted into a Jinja template (e.g. a string, a safe_dom
# Node or NodeList, or a jinja2.Markup).
PAGE_HEADER_HOOKS = []
# A list of functions which are used to generate extra info about a lesson
# or unit in the course outline view. Modules which can provide extra info
# should add a function to this list which accepts a course and a lesson or
# unit as argument and returns a safe_dom NodeList or Node.
COURSE_OUTLINE_EXTRA_INFO_ANNOTATORS = []
# Modules adding extra info annotators (above) may also add a string to this
# list which will be displayed at a heading in the course outline table.
COURSE_OUTLINE_EXTRA_INFO_TITLES = []
# A list of hrefs for extra CSS files to be included in dashboard pages.
# Files listed here by URL will be available on every Dashboard page.
EXTRA_CSS_HREF_LIST = []
# A list of hrefs for extra JS files to be included in dashboard pages.
# Files listed here by URL will be available on every Dashboard page.
EXTRA_JS_HREF_LIST = []
# Dictionary that maps external permissions to their descriptions
_external_permissions = {}
# Dictionary that maps actions to permissions
_action_to_permission = {}
# Other modules which manage editable assets can add functions here to
# list their assets on the Assets tab. The function will receive an instance
# of DashboardHandler as an argument.
contrib_asset_listers = []
_custom_nav_mappings = collections.OrderedDict()
_custom_get_actions = {}
_custom_post_actions = {}
@classmethod
def add_nav_mapping(cls, action, nav_title):
"""Add a Nav mapping for Dashboard."""
cls._custom_nav_mappings[action] = nav_title
@classmethod
def remove_nav_mapping(cls, action):
"""Add a Nav mapping for Dashboard."""
if action in cls._custom_nav_mappings:
cls._custom_nav_mappings.pop(action)
@classmethod
def get_nav_mappings(cls):
return (cls._nav_mappings.items() +
sorted(cls._custom_nav_mappings.items()))
@classmethod
def get_nav_title(cls, action):
if action in cls._nav_mappings:
return cls._nav_mappings[action]
if action in cls._custom_nav_mappings:
return cls._custom_nav_mappings[action]
return None
@classmethod
def add_custom_get_action(cls, action, handler, in_action=None,
overwrite=False):
if not action:
logging.critical('Action not specified. Ignoring.')
return
if not handler:
tab_list = tabs.Registry.get_tab_group(action)
if not tab_list:
logging.critical('For action : ' + action +
' handler can not be null.')
return
if ((action in cls._custom_get_actions or action in cls.get_actions)
and not overwrite):
logging.critical('action : ' + action +
' already exists. Ignoring the custom get action.')
return
cls._custom_get_actions[action] = (handler, in_action)
@classmethod
def remove_custom_get_action(cls, action):
if action in cls._custom_get_actions:
cls._custom_get_actions.pop(action)
@classmethod
def add_custom_post_action(cls, action, handler, overwrite=False):
if not handler or not action:
logging.critical('Action or handler can not be null.')
return
if ((action in cls._custom_post_actions or action in cls.post_actions)
and not overwrite):
logging.critical('action : ' + action +
' already exists. Ignoring the custom get action.')
return
cls._custom_post_actions[action] = handler
@classmethod
def remove_custom_post_action(cls, action):
if action in cls._custom_post_actions:
cls._custom_post_actions.pop(action)
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return cls.child_routes
def can_view(self, action):
"""Checks if current user has viewing rights."""
return roles.Roles.is_user_allowed(
self.app_context, custom_module,
self._action_to_permission.get('get_%s' % action, '')
)
def can_edit(self):
"""Checks if current user has editing rights."""
return roles.Roles.is_course_admin(self.app_context)
def _default_action_for_current_permissions(self):
"""Set the default or first active navigation tab as default action."""
if self.can_view(self.default_tab_action):
return self.default_tab_action
for nav in self.get_nav_mappings():
if self.can_view(nav[0]):
return nav[0]
return ''
def get(self):
"""Enforces rights to all GET operations."""
action = self.request.get('action')
if not action:
self.default_action = self._default_action_for_current_permissions()
action = self.default_action
if not self.can_view(action):
self.redirect(self.app_context.get_slug())
return
if action in self._custom_get_actions:
return self.custom_get_handler()
# Force reload of properties. It is expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
return super(DashboardHandler, self).get()
def post(self):
"""Enforces rights to all POST operations."""
if not self.can_edit():
self.redirect(self.app_context.get_slug())
return
action = self.request.get('action')
if action in self._custom_post_actions:
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not crypto.XsrfTokenManager.is_xsrf_token_valid(
xsrf_token, action):
self.error(403)
return
self.custom_post_handler()
return
return super(DashboardHandler, self).post()
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, dirs + [os.path.dirname(__file__)], handler=self)
def _get_alerts(self):
alerts = []
if not self.app_context.is_editable_fs():
alerts.append('Read-only course.')
if not self.app_context.now_available:
alerts.append('The course is not publicly available.')
return '\n'.join(alerts)
def _get_top_nav(self, in_action, in_tab):
current_action = in_action or self.request.get(
'action') or self.default_action
nav_bars = []
nav = safe_dom.NodeList()
for action, title in self.get_nav_mappings():
if not self.can_view(action):
continue
class_name = 'selected' if action == current_action else ''
action_href = 'dashboard?action=%s' % action
nav.append(safe_dom.Element(
'a', href=action_href, className=class_name).add_text(
title))
if roles.Roles.is_super_admin():
nav.append(safe_dom.Element(
'a', href='admin?action=admin',
className=('selected' if current_action == 'admin' else '')
).add_text('Site Admin'))
nav.append(safe_dom.Element(
'a',
href='https://code.google.com/p/course-builder/wiki/Dashboard',
target='_blank'
).add_text('Help'))
nav.append(safe_dom.Element(
'a',
href=(
'https://groups.google.com/forum/?fromgroups#!categories/'
'course-builder-forum/general-troubleshooting'),
target='_blank'
).add_text('Support'))
nav_bars.append(nav)
tab_group = tabs.Registry.get_tab_group(current_action)
if tab_group:
if current_action == 'assets':
exclude_tabs = []
course = self.get_course()
if courses.has_only_new_style_assessments(course):
exclude_tabs.append('Assessments')
if courses.has_only_new_style_activities(course):
exclude_tabs.append('Activities')
tab_group = [
t for t in tab_group if t.title not in exclude_tabs]
tab_name = (in_tab or self.request.get('tab') or
self.default_subtab_action[current_action]
or tab_group[0].name)
sub_nav = safe_dom.NodeList()
for tab in tab_group:
href = tab.href or 'dashboard?action=%s&tab=%s' % (
current_action, tab.name)
target = tab.target or '_self'
sub_nav.append(
safe_dom.A(
href,
className=('selected' if tab.name == tab_name else ''),
target=target)
.add_text(tab.title))
nav_bars.append(sub_nav)
return nav_bars
def render_page(self, template_values, in_action=None, in_tab=None):
"""Renders a page using provided template values."""
template_values['header_title'] = template_values['page_title']
template_values['page_headers'] = [
hook(self) for hook in self.PAGE_HEADER_HOOKS]
template_values['course_picker'] = self.get_course_picker()
template_values['course_title'] = self.app_context.get_title()
template_values['top_nav'] = self._get_top_nav(in_action, in_tab)
template_values['gcb_course_base'] = self.get_base_href(self)
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout'))
template_values[
'page_footer'] = 'Page created on: %s' % datetime.datetime.now()
template_values['coursebuilder_version'] = (
os.environ['GCB_PRODUCT_VERSION'])
template_values['application_id'] = app_identity.get_application_id()
template_values['application_version'] = (
os.environ['CURRENT_VERSION_ID'])
template_values['can_highlight_code'] = oeditor.CAN_HIGHLIGHT_CODE.value
template_values['extra_css_href_list'] = self.EXTRA_CSS_HREF_LIST
template_values['extra_js_href_list'] = self.EXTRA_JS_HREF_LIST
if not template_values.get('sections'):
template_values['sections'] = []
self.response.write(
self.get_template('view.html', []).render(template_values))
def get_course_picker(self, destination=None):
destination = destination or '/dashboard'
action = self.request.get('action') or self.default_action
# disable picker if we are on the well known page; we dont want picked
# on pages where edits or creation of new object can get triggered
safe_action = action and action in [
a for a, _ in self.get_nav_mappings()] + ['admin']
tab = self.request.get('tab')
if action in self.get_actions:
tab_group = tabs.Registry.get_tab_group(action)
if tab_group and tab in tab_group:
tab = '&tab=%s' % tab
else:
tab = ''
destination = '%s?action=%s%s' % (destination, action, tab)
current_course = sites.get_course_for_current_request()
options = []
for course in sorted(sites.get_all_courses()):
with Namespace(course.namespace):
if self.current_user_has_access(course):
url = (
course.canonicalize_url(destination) if safe_action
else 'javascript:void(0)')
title = '%s (%s)' % (course.get_title(), course.get_slug())
option = safe_dom.Element('li')
link = safe_dom.A(url).add_text(title)
if current_course == course:
link.set_attribute('class', 'selected')
option.add_child(link)
options.append((course.get_title(), option))
picker_class_name = 'hidden'
if not safe_action:
picker_class_name += ' disabled'
picker = safe_dom.Element(
'ol', id='gcb-course-picker-menu', className=picker_class_name)
for title, option in sorted(
options, key=lambda item: item[0].lower()):
picker.append(option)
return picker
def format_title(self, text):
"""Formats standard title with or without course picker."""
ret = safe_dom.NodeList()
cb_text = 'Course Builder '
ret.append(safe_dom.Text(cb_text))
ret.append(safe_dom.Entity('>'))
ret.append(safe_dom.Text(' %s ' % self.app_context.get_title()))
ret.append(safe_dom.Entity('>'))
dashboard_text = ' Dashboard '
ret.append(safe_dom.Text(dashboard_text))
ret.append(safe_dom.Entity('>'))
ret.append(safe_dom.Text(' %s' % text))
return ret
def _render_course_outline_to_html(self, course):
"""Renders course outline to HTML."""
units = []
for unit in course.get_units():
if course.get_parent_unit(unit.unit_id):
continue # Will be rendered as part of containing element.
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
units.append(self._render_assessment_outline(unit))
elif unit.type == verify.UNIT_TYPE_LINK:
units.append(self._render_link_outline(unit))
elif unit.type == verify.UNIT_TYPE_UNIT:
units.append(self._render_unit_outline(course, unit))
elif unit.type == verify.UNIT_TYPE_CUSTOM:
units.append(self._render_custom_unit_outline(course, unit))
else:
raise Exception('Unknown unit type: %s.' % unit.type)
template_values = {
'course': {
'title': course.title,
'is_editable': self.app_context.is_editable_fs(),
'availability': {
'url': self.get_action_url('course_availability'),
'xsrf_token': self.create_xsrf_token('course_availability'),
'param': not self.app_context.now_available,
'class': (
'reveal-on-hover icon md md-lock-open'
if self.app_context.now_available else
'reveal-on-hover icon md md-lock')
}
},
'units': units,
'add_lesson_xsrf_token': self.create_xsrf_token('add_lesson'),
'status_xsrf_token': self.create_xsrf_token('set_draft_status'),
'unit_lesson_title_xsrf_token': self.create_xsrf_token(
UnitLessonTitleRESTHandler.XSRF_TOKEN),
'unit_title_template': resources_display.get_unit_title_template(
course.app_context),
'extra_info_title': ', '.join(self.COURSE_OUTLINE_EXTRA_INFO_TITLES)
}
return jinja2.Markup(
self.get_template(
'course_outline.html', []).render(template_values))
def _render_status_icon(self, resource, key, component_type):
if not hasattr(resource, 'now_available'):
return
icon = safe_dom.Element(
'div', data_key=str(key), data_component_type=component_type)
common_classes = 'reveal-on-hover icon icon-draft-status md'
if not self.app_context.is_editable_fs():
common_classes += ' inactive'
if resource.now_available:
icon.add_attribute(
alt=resources_display.PUBLISHED_TEXT,
title=resources_display.PUBLISHED_TEXT,
className=common_classes + ' md-lock-open',
)
else:
icon.add_attribute(
alt=resources_display.DRAFT_TEXT,
title=resources_display.DRAFT_TEXT,
className=common_classes + ' md-lock'
)
return icon
def _render_assessment_outline(self, unit):
actions = []
unit_data = {
'title': unit.title,
'class': 'assessment',
'href': 'assessment?name=%s' % unit.unit_id,
'unit_id': unit.unit_id,
'actions': actions
}
actions.append(self._render_status_icon(unit, unit.unit_id, 'unit'))
if self.app_context.is_editable_fs():
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_assessment',
'key': unit.unit_id})
actions.append(self._create_edit_button_for_course_outline(url))
return unit_data
def _render_link_outline(self, unit):
actions = []
unit_data = {
'title': unit.title,
'class': 'link',
'href': unit.href or '',
'unit_id': unit.unit_id,
'actions': actions
}
actions.append(self._render_status_icon(unit, unit.unit_id, 'unit'))
if self.app_context.is_editable_fs():
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_link',
'key': unit.unit_id})
actions.append(self._create_edit_button_for_course_outline(url))
return unit_data
def _render_custom_unit_outline(self, unit):
actions = []
unit_data = {
'title': unit.title,
'class': 'custom-unit',
'href': unit.custom_unit_url,
'unit_id': unit.unit_id,
'actions': actions
}
actions.append(self._render_status_icon(unit, unit.unit_id, 'unit'))
if self.app_context.is_editable_fs():
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_custom_unit',
'key': unit.unit_id,
'unit_type': unit.custom_unit_type})
actions.append(self._create_edit_button_for_course_outline(url))
return unit_data
def _render_unit_outline(self, course, unit):
is_editable = self.app_context.is_editable_fs()
actions = []
unit_data = {
'title': unit.title,
'class': 'unit',
'href': 'unit?unit=%s' % unit.unit_id,
'unit_id': unit.unit_id,
'actions': actions
}
actions.append(self._render_status_icon(unit, unit.unit_id, 'unit'))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_unit',
'key': unit.unit_id})
actions.append(self._create_edit_button_for_course_outline(url))
if unit.pre_assessment:
assessment = course.find_unit_by_id(unit.pre_assessment)
if assessment:
assessment_outline = self._render_assessment_outline(assessment)
assessment_outline['class'] = 'pre-assessment'
unit_data['pre_assessment'] = assessment_outline
lessons = []
for lesson in course.get_lessons(unit.unit_id):
actions = []
actions.append(
self._render_status_icon(lesson, lesson.lesson_id, 'lesson'))
if is_editable:
url = self.get_action_url(
'edit_lesson', key=lesson.lesson_id)
actions.append(self._create_edit_button_for_course_outline(url))
extras = []
for annotator in self.COURSE_OUTLINE_EXTRA_INFO_ANNOTATORS:
extra_info = annotator(course, lesson)
if extra_info:
extras.append(extra_info)
lessons.append({
'title': lesson.title,
'class': 'lesson',
'href': 'unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id),
'lesson_id': lesson.lesson_id,
'actions': actions,
'auto_index': lesson.auto_index,
'extras': extras})
unit_data['lessons'] = lessons
if unit.post_assessment:
assessment = course.find_unit_by_id(unit.post_assessment)
if assessment:
assessment_outline = self._render_assessment_outline(assessment)
assessment_outline['class'] = 'post-assessment'
unit_data['post_assessment'] = assessment_outline
return unit_data
def get_question_preview(self):
template_values = {}
template_values['gcb_course_base'] = self.get_base_href(self)
template_values['question'] = tags.html_to_safe_dom(
'<question quid="%s">' % self.request.get('quid'), self)
self.response.write(self.get_template(
'question_preview.html', []).render(template_values))
def _get_about_course(self, template_values, tab):
# Basic course info.
course_info = []
course_actions = []
if not self.app_context.is_editable_fs():
course_info.append('The course is read-only.')
else:
if self.app_context.now_available:
if self.app_context.get_environ()['course']['browsable']:
browsable = True
course_browsability_caption = (
'Hide Course From Unregistered Users')
course_info.append('The course is is browsable by '
'un-registered users')
else:
browsable = False
course_browsability_caption = (
'Allow Unregistered Users to Browse Course')
course_info.append('The course is not visible to '
'un-registered users.')
course_actions.append({
'id': 'course_browsability',
'caption': course_browsability_caption,
'action': self.get_action_url('course_browsability'),
'xsrf_token': self.create_xsrf_token('course_browsability'),
'params': {'browsability': not browsable},
})
currentCourse = courses.Course(self)
course_info.append('Schema Version: %s' % currentCourse.version)
course_info.append('Context Path: %s' % self.app_context.get_slug())
course_info.append('Datastore Namespace: %s' %
self.app_context.get_namespace_name())
# Course file system.
fs = self.app_context.fs.impl
course_info.append(('File System: %s' % fs.__class__.__name__))
if fs.__class__ == vfs.LocalReadOnlyFileSystem:
course_info.append(('Home Folder: %s' % sites.abspath(
self.app_context.get_home_folder(), '/')))
data_info = dashboard_utils.list_files(self, '/data/')
sections = [
{
'title': 'About the Course',
'description': messages.ABOUT_THE_COURSE_DESCRIPTION,
'actions': course_actions,
'children': course_info},]
if currentCourse.version == courses.COURSE_MODEL_VERSION_1_2:
sections.append({
'title': 'Data Files',
'description': messages.DATA_FILES_DESCRIPTION,
'children': data_info})
template_values['alerts'] = self._get_alerts()
template_values['sections'] = sections
def get_outline(self):
"""Renders course outline view."""
currentCourse = courses.Course(self)
outline_actions = []
if self.app_context.is_editable_fs():
outline_actions.append({
'id': 'add_unit',
'caption': 'Add Unit',
'action': self.get_action_url('add_unit'),
'xsrf_token': self.create_xsrf_token('add_unit')})
outline_actions.append({
'id': 'add_link',
'caption': 'Add Link',
'action': self.get_action_url('add_link'),
'xsrf_token': self.create_xsrf_token('add_link')})
outline_actions.append({
'id': 'add_assessment',
'caption': 'Add Assessment',
'action': self.get_action_url('add_assessment'),
'xsrf_token': self.create_xsrf_token('add_assessment')})
for custom_type in custom_units.UnitTypeRegistry.list():
outline_actions.append({
'id': 'add_custom_unit_%s' % custom_type.identifier,
'caption': 'Add %s' % custom_type.name,
'action': self.get_action_url(
'add_custom_unit',
extra_args={'unit_type': custom_type.identifier}),
'xsrf_token': self.create_xsrf_token('add_custom_unit')})
if not currentCourse.get_units():
outline_actions.append({
'id': 'import_course',
'caption': 'Import',
'href': self.get_action_url('import_course')
})
sections = [
{
'title': 'Course Outline',
'description': messages.COURSE_OUTLINE_DESCRIPTION,
'actions': outline_actions,
'pre': self._render_course_outline_to_html(currentCourse)}]
template_values = {
'page_title': self.format_title('Outline'),
'alerts': self._get_alerts(),
'sections': sections,
}
self.render_page(template_values)
def custom_get_handler(self):
"""Renders Enabled Custom Units view."""
action = self.request.get('action')
in_action = self._custom_get_actions[action][1]
tab = tabs.Registry.get_tab(action, self.request.get('tab'))
if not tab:
tab_list = tabs.Registry.get_tab_group(action)
if not tab_list:
self._custom_get_actions[action][0](self)
return
tab = tab_list[0]
template_values = {
'page_title': self.format_title('Custom Modules > %s' % tab.title),
'main_content': tab.contents.display_html(self),
}
self.render_page(template_values, in_action=in_action)
def custom_post_handler(self):
"""Edit Custom Unit Settings view."""
action = self.request.get('action')
self._custom_post_actions[action](self)
def get_action_url(self, action, key=None, extra_args=None, fragment=None):
args = {'action': action}
if key:
args['key'] = key
if extra_args:
args.update(extra_args)
url = '/dashboard?%s' % urllib.urlencode(args)
if fragment:
url += '#' + fragment
return self.canonicalize_url(url)
def get_settings(self):
tab = tabs.Registry.get_tab(
'settings', self.request.get('tab') or 'course')
template_values = {
'page_title': self.format_title('Settings > %s' % tab.title),
'page_description': messages.SETTINGS_DESCRIPTION,
}
exit_url = self.request.get('exit_url')
if tab.name == 'admin_prefs':
self._edit_admin_preferences(template_values, '')
elif tab.name == 'advanced':
self._get_settings_advanced(template_values, tab)
elif tab.name == 'about':
self._get_about_course(template_values, tab)
else:
self._show_edit_settings_section(
template_values, '/course.yaml', tab.name, tab.title,
tab.contents, exit_url)
self.render_page(template_values)
def text_file_to_safe_dom(self, reader, content_if_empty):
"""Load text file and convert it to safe_dom tree for display."""
info = []
if reader:
lines = reader.read().decode('utf-8')
for line in lines.split('\n'):
if not line:
continue
pre = safe_dom.Element('pre')
pre.add_text(line)
info.append(pre)
else:
info.append(content_if_empty)
return info
def text_file_to_string(self, reader, content_if_empty):
"""Load text file and convert it to string for display."""
if reader:
return reader.read().decode('utf-8')
else:
return content_if_empty
def _get_settings_advanced(self, template_values, tab):
"""Renders course settings view."""
actions = []
if self.app_context.is_editable_fs():
actions.append({
'id': 'edit_course_yaml',
'caption': 'Advanced Edit',
'action': self.get_action_url(
'create_or_edit_settings',
extra_args={
'tab': tab.name,
'tab_title': tab.title,
}),
'xsrf_token': self.create_xsrf_token(
'create_or_edit_settings')})
# course.yaml file content.
yaml_reader = self.app_context.fs.open(
self.app_context.get_config_filename())
yaml_info = self.text_file_to_safe_dom(yaml_reader, '< empty file >')
yaml_reader = self.app_context.fs.open(
self.app_context.get_config_filename())
yaml_lines = self.text_file_to_string(yaml_reader, '< empty file >')
# course_template.yaml file contents
course_template_reader = open(os.path.join(os.path.dirname(
__file__), '../../course_template.yaml'), 'r')
course_template_info = self.text_file_to_safe_dom(
course_template_reader, '< empty file >')
course_template_reader = open(os.path.join(os.path.dirname(
__file__), '../../course_template.yaml'), 'r')
course_template_lines = self.text_file_to_string(
course_template_reader, '< empty file >')
template_values['sections'] = [
{
'title': 'Contents of course.yaml file',
'description': messages.CONTENTS_OF_THE_COURSE_DESCRIPTION,
'actions': actions,
'children': yaml_info,
'code': yaml_lines,
'mode': 'yaml'
},
{
'title': 'Contents of course_template.yaml file',
'description': messages.COURSE_TEMPLATE_DESCRIPTION,
'children': course_template_info,
'code': course_template_lines,
'mode': 'yaml'
}
]
def list_and_format_file_list(
self, title, subfolder, tab_name,
links=False, upload=False, prefix=None, caption_if_empty='< none >',
edit_url_template=None, merge_local_files=False, sub_title=None,
all_paths=None):
"""Walks files in folders and renders their names in a section."""
# keep a list of files without merging
unmerged_files = {}
if merge_local_files:
unmerged_files = dashboard_utils.list_files(
self, subfolder, merge_local_files=False, all_paths=all_paths)
items = safe_dom.NodeList()
count = 0
for filename in dashboard_utils.list_files(
self, subfolder, merge_local_files=merge_local_files,
all_paths=all_paths):
if prefix and not filename.startswith(prefix):
continue
# make a <li> item
li = safe_dom.Element('li')
if links:
url = urllib.quote(filename)
li.add_child(safe_dom.Element(
'a', href=url).add_text(filename))
else:
li.add_text(filename)
# add actions if available
if (edit_url_template and
self.app_context.fs.impl.is_read_write()):
li.add_child(safe_dom.Entity(' '))
edit_url = edit_url_template % (
tab_name, urllib.quote(filename))
# show [overridden] + edit button if override exists
if (filename in unmerged_files) or (not merge_local_files):
li.add_text('[Overridden]').add_child(
self._create_edit_button(edit_url))
# show an [override] link otherwise
else:
li.add_child(safe_dom.A(edit_url).add_text('[Override]'))
count += 1
items.append(li)
output = safe_dom.NodeList()
if self.app_context.is_editable_fs() and upload:
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?%s' % urllib.urlencode(
{'action': 'manage_asset',
'tab': tab_name,
'key': subfolder})
).add_text(
'Upload to ' + subfolder.lstrip('/').rstrip('/'))
).append(
safe_dom.Element(
'div', style='clear: both; padding-top: 2px;'
)
)
if title:
h3 = safe_dom.Element('h3')
if count:
h3.add_text('%s (%s)' % (title, count))
else:
h3.add_text(title)
output.append(h3)
if sub_title:
output.append(safe_dom.Element('blockquote').add_text(sub_title))
if items:
output.append(safe_dom.Element('ol').add_children(items))
else:
if caption_if_empty:
output.append(
safe_dom.Element('blockquote').add_text(caption_if_empty))
return output
def _attach_filter_data(self, element):
course = courses.Course(self)
unit_list = []
assessment_list = []
for unit in self.get_units():
if verify.UNIT_TYPE_UNIT == unit.type:
unit_list.append((unit.unit_id, unit.title))
if unit.is_assessment():
assessment_list.append((unit.unit_id, unit.title))
lessons_map = {}
for (unit_id, unused_title) in unit_list:
lessons_map[unit_id] = [
(l.lesson_id, l.title) for l in course.get_lessons(unit_id)]
element.add_attribute(
data_units=transforms.dumps(unit_list + assessment_list),
data_lessons_map=transforms.dumps(lessons_map),
data_questions=transforms.dumps(
[(question.id, question.description) for question in sorted(
QuestionDAO.get_all(), key=lambda q: q.description)]
),
data_groups=transforms.dumps(
[(group.id, group.description) for group in sorted(
QuestionGroupDAO.get_all(), key=lambda g: g.description)]
),
data_types=transforms.dumps([
(QuestionDTO.MULTIPLE_CHOICE, 'Multiple Choice'),
(QuestionDTO.SHORT_ANSWER, 'Short Answer')])
)
def _create_location_link(self, text, url, loc_id, count):
return safe_dom.Element(
'li', data_count=str(count), data_id=str(loc_id)).add_child(
safe_dom.Element('a', href=url).add_text(text)).add_child(
safe_dom.Element('span', className='count').add_text(
' (%s)' % count if count > 1 else ''))
def _create_locations_cell(self, locations):
ul = safe_dom.Element('ul')
for (assessment, count) in locations.get('assessments', {}).iteritems():
ul.add_child(self._create_location_link(
assessment.title, 'assessment?name=%s' % assessment.unit_id,
assessment.unit_id, count
))
for ((lesson, unit), count) in locations.get('lessons', {}).iteritems():
ul.add_child(self._create_location_link(
'%s: %s' % (unit.title, lesson.title),
'unit?unit=%s&lesson=%s' % (unit.unit_id, lesson.lesson_id),
lesson.lesson_id, count
))
return safe_dom.Element('td', className='locations').add_child(ul)
def _create_list(self, list_items):
ul = safe_dom.Element('ul')
for item in list_items:
ul.add_child(safe_dom.Element('li').add_child(item))
return ul
def _create_list_cell(self, list_items):
return safe_dom.Element('td').add_child(self._create_list(list_items))
def _create_edit_button(self, edit_url):
return safe_dom.A(
href=edit_url,
className='icon md-mode-edit',
title='Edit',
alt='Edit',
)
def _create_edit_button_for_course_outline(self, edit_url):
# TODO(jorr): Replace _create_edit_button with this
return safe_dom.A(
href=edit_url,
className='icon md md-mode-edit reveal-on-hover',
title='Edit',
alt='Edit',
)
def _create_add_to_group_button(self):
return safe_dom.Element(
'div',
className='icon md md-add-circle gcb-pull-right',
title='Add to question group',
alt='Add to question group'
)
def _create_preview_button(self):
return safe_dom.Element(
'div',
className='icon md md-visibility',
title='Preview',
alt='Preview'
)
def _create_clone_button(self, question_id):
return safe_dom.A(
href='#',
className='icon md md-content-copy',
title='Clone',
alt='Clone',
data_key=str(question_id)
)
def _add_assets_table(self, output, table_id, columns):
"""Creates an assets table with the specified columns.
Args:
output: safe_dom.NodeList to which the table should be appended.
table_id: string specifying the id for the table
columns: list of tuples that specifies column name and width.
For example ("Description", 35) would create a column with a
width of 35% and the header would be Description.
Returns:
The table safe_dom.Element of the created table.
"""
container = safe_dom.Element('div', className='assets-table-container')
output.append(container)
table = safe_dom.Element('table', className='assets-table', id=table_id)
container.add_child(table)
thead = safe_dom.Element('thead')
table.add_child(thead)
tr = safe_dom.Element('tr')
thead.add_child(tr)
ths = safe_dom.NodeList()
for (title, width) in columns:
ths.append(safe_dom.Element(
'th', style=('width: %s%%' % width)).add_text(title).add_child(
safe_dom.Element(
'span', className='md md-arrow-drop-up')).add_child(
safe_dom.Element(
'span', className='md md-arrow-drop-down')))
tr.add_children(ths)
return table
def _create_filter(self):
return safe_dom.Element(
'div', className='gcb-pull-right filter-container',
id='question-filter'
).add_child(
safe_dom.Element(
'button', className='gcb-button gcb-pull-right filter-button'
).add_text('Filter')
)
def _create_empty_footer(self, text, colspan, set_hidden=False):
"""Creates a <tfoot> that will be visible when the table is empty."""
tfoot = safe_dom.Element('tfoot')
if set_hidden:
tfoot.add_attribute(style='display: none')
empty_tr = safe_dom.Element('tr')
return tfoot.add_child(empty_tr.add_child(safe_dom.Element(
'td', colspan=str(colspan), style='text-align: center'
).add_text(text)))
def _get_question_locations(self, quid, location_maps, used_by_groups):
"""Calculates the locations of a question and its containing groups."""
(qulocations_map, qglocations_map) = location_maps
locations = qulocations_map.get(quid, None)
if locations is None:
locations = {'lessons': {}, 'assessments': {}}
else:
locations = copy.deepcopy(locations)
# At this point locations holds counts of the number of times quid
# appears in each lesson and assessment. Now adjust the counts by
# counting the number of times quid appears in a question group in that
# lesson or assessment.
lessons = locations['lessons']
assessments = locations['assessments']
for group in used_by_groups:
qglocations = qglocations_map.get(group.id, None)
if not qglocations:
continue
for lesson in qglocations['lessons']:
lessons[lesson] = lessons.get(lesson, 0) + 1
for assessment in qglocations['assessments']:
assessments[assessment] = assessments.get(assessment, 0) + 1
return locations
def list_questions(self, all_questions, all_question_groups, location_maps):
"""Prepare a list of the question bank contents."""
if not self.app_context.is_editable_fs():
return safe_dom.NodeList()
output = safe_dom.NodeList().append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=add_mc_question'
).add_text('Add Multiple Choice')
).append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=add_sa_question'
).add_text('Add Short Answer')
).append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=import_gift_questions'
).add_text('Import GIFT Questions')
).append(self._create_filter()).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;')
).append(safe_dom.Element('h3').add_text(
'Questions (%s)' % len(all_questions)
))
# Create questions table
table = self._add_assets_table(
output, 'question-table', [
('Description', 25), ('Question Groups', 25),
('Course Locations', 25), ('Last Modified', 16), ('Type', 9)]
)
self._attach_filter_data(table)
table.add_attribute(
data_clone_question_token=self.create_xsrf_token('clone_question'))
table.add_attribute(
data_qg_xsrf_token=self.create_xsrf_token('add_to_question_group'))
tbody = safe_dom.Element('tbody')
table.add_child(tbody)
table.add_child(self._create_empty_footer(
'No questions available', 5, all_questions))
question_to_group = {}
for group in all_question_groups:
for quid in group.question_ids:
question_to_group.setdefault(long(quid), []).append(group)
for question in all_questions:
tr = safe_dom.Element('tr', data_quid=str(question.id))
# Add description including action icons
td = safe_dom.Element('td', className='description')
tr.add_child(td)
td.add_child(self._create_edit_button(
'dashboard?action=edit_question&key=%s' % question.id))
td.add_child(self._create_preview_button())
td.add_child(self._create_clone_button(question.id))
td.add_text(question.description)
# Add containing question groups
used_by_groups = question_to_group.get(question.id, [])
cell = safe_dom.Element('td', className='groups')
if all_question_groups:
cell.add_child(self._create_add_to_group_button())
cell.add_child(self._create_list(
[safe_dom.Text(group.description) for group in sorted(
used_by_groups, key=lambda g: g.description)]
))
tr.add_child(cell)
# Add locations
locations = self._get_question_locations(
question.id, location_maps, used_by_groups)
tr.add_child(self._create_locations_cell(locations))
# Add last modified timestamp
tr.add_child(safe_dom.Element(
'td',
data_timestamp=str(question.last_modified),
className='timestamp'
))
# Add question type
tr.add_child(safe_dom.Element('td').add_text(
'MC' if question.type == QuestionDTO.MULTIPLE_CHOICE else (
'SA' if question.type == QuestionDTO.SHORT_ANSWER else (
'Unknown Type'))
).add_attribute(style='text-align: center'))
# Add filter information
filter_info = {}
filter_info['description'] = question.description
filter_info['type'] = question.type
filter_info['lessons'] = []
unit_ids = set()
for (lesson, unit) in locations.get('lessons', ()):
unit_ids.add(unit.unit_id)
filter_info['lessons'].append(lesson.lesson_id)
filter_info['units'] = list(unit_ids) + [
a.unit_id for a in locations.get('assessments', ())]
filter_info['groups'] = [qg.id for qg in used_by_groups]
filter_info['unused'] = 0 if locations else 1
tr.add_attribute(data_filter=transforms.dumps(filter_info))
tbody.add_child(tr)
return output
def list_question_groups(
self, all_questions, all_question_groups, locations_map):
"""Prepare a list of question groups."""
if not self.app_context.is_editable_fs():
return safe_dom.NodeList()
output = safe_dom.NodeList()
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=add_question_group'
).add_text('Add Question Group')
).append(
safe_dom.Element(
'div', style='clear: both; padding-top: 2px;'
)
)
output.append(safe_dom.Element('h3').add_text(
'Question Groups (%s)' % len(all_question_groups)
))
# Create question groups table
table = self._add_assets_table(
output, 'question-group-table', [
('Description', 25), ('Questions', 25), ('Course Locations', 25),
('Last Modified', 25)]
)
tbody = safe_dom.Element('tbody')
table.add_child(tbody)
if not all_question_groups:
table.add_child(self._create_empty_footer(
'No question groups available', 4))
quid_to_question = {long(qu.id): qu for qu in all_questions}
for question_group in all_question_groups:
tr = safe_dom.Element('tr', data_qgid=str(question_group.id))
# Add description including action icons
td = safe_dom.Element('td', className='description')
tr.add_child(td)
td.add_child(self._create_edit_button(
'dashboard?action=edit_question_group&key=%s' % (
question_group.id)))
td.add_text(question_group.description)
# Add questions
tr.add_child(self._create_list_cell([
safe_dom.Text(descr) for descr in sorted([
quid_to_question[long(quid)].description
for quid in question_group.question_ids])
]).add_attribute(className='questions'))
# Add locations
tr.add_child(self._create_locations_cell(
locations_map.get(question_group.id, {})))
# Add last modified timestamp
tr.add_child(safe_dom.Element(
'td',
data_timestamp=str(question_group.last_modified),
className='timestamp'
))
tbody.add_child(tr)
return output
def list_labels(self):
"""Prepare a list of labels for use on the Assets page."""
output = safe_dom.NodeList()
if not self.app_context.is_editable_fs():
return output
output.append(
safe_dom.A('dashboard?action=add_label',
className='gcb-button gcb-pull-right'
).add_text('Add Label')
).append(
safe_dom.Element(
'div', style='clear: both; padding-top: 2px;'
)
)
output.append(
safe_dom.Element('h3').add_text('Labels')
)
labels = LabelDAO.get_all()
if labels:
all_labels_ul = safe_dom.Element('ul')
output.append(all_labels_ul)
for label_type in sorted(
models.LabelDTO.LABEL_TYPES,
lambda a, b: cmp(a.menu_order, b.menu_order)):
type_li = safe_dom.Element('li').add_text(label_type.title)
all_labels_ul.add_child(type_li)
labels_of_type_ul = safe_dom.Element('ul')
type_li.add_child(labels_of_type_ul)
for label in sorted(
labels, lambda a, b: cmp(a.title, b.title)):
if label.type == label_type.type:
li = safe_dom.Element('li')
labels_of_type_ul.add_child(li)
li.add_text(
label.title
).add_attribute(
title='id: %s, type: %s' % (label.id, label_type))
if label_type not in (
models.LabelDTO.SYSTEM_EDITABLE_LABEL_TYPES):
li.add_child(
self._create_edit_button(
'dashboard?action=edit_label&key=%s' %
label.id,
).add_attribute(
id='label_%s' % label.title))
else:
output.append(safe_dom.Element('blockquote').add_text('< none >'))
return output
def get_assets(self):
"""Renders course assets view."""
all_paths = self.app_context.fs.list(
sites.abspath(self.app_context.get_home_folder(), '/'))
tab = tabs.Registry.get_tab(
'assets', self.request.get('tab') or 'questions')
items = safe_dom.NodeList()
tab.contents(self, items, tab, all_paths)
title_text = 'Assets > %s' % tab.title
template_values = {
'page_title': self.format_title(title_text),
'page_description': messages.ASSETS_DESCRIPTION,
'main_content': items,
}
self.render_page(template_values)
def filer_url_template(self):
return 'dashboard?action=manage_text_asset&tab=%s&uri=%s'
def get_assets_contrib(self, items, tab, all_paths):
if not self.contrib_asset_listers:
items.append(safe_dom.Text(
'No assets extensions have been registered'))
else:
for asset_lister in self.contrib_asset_listers:
items.append(asset_lister(self))
def get_assets_questions(self, items, tab, all_paths):
all_questions = QuestionDAO.get_all()
all_question_groups = QuestionGroupDAO.get_all()
locations = courses.Course(
self).get_component_locations()
items.append(self.list_questions(
all_questions, all_question_groups, locations))
items.append(self.list_question_groups(
all_questions, all_question_groups, locations[1]))
def get_assets_labels(self, items, tab, all_paths):
items.append(self.list_labels())
def get_assets_assessments(self, items, tab, all_paths):
items.append(self.list_and_format_file_list(
'Assessments', '/assets/js/', tab.name, links=True,
prefix='assets/js/assessment-', all_paths=all_paths))
def get_assets_activities(self, items, tab, all_paths):
items.append(self.list_and_format_file_list(
'Activities', '/assets/js/', tab.name, links=True,
prefix='assets/js/activity-', all_paths=all_paths))
def get_assets_images(self, items, tab, all_paths):
items.append(self.list_and_format_file_list(
'Images & Documents', '/assets/img/', tab.name, links=True,
upload=True, merge_local_files=True,
edit_url_template=(
'dashboard?action=manage_asset&tab=%s&key=%s'),
caption_if_empty='< inherited from /assets/img/ >',
all_paths=all_paths))
def get_assets_css(self, items, tab, all_paths):
items.append(self.list_and_format_file_list(
'CSS', '/assets/css/', tab.name, links=True,
upload=True, edit_url_template=self.filer_url_template(),
caption_if_empty='< inherited from /assets/css/ >',
merge_local_files=True, all_paths=all_paths))
def get_assets_js(self, items, tab, all_paths):
items.append(self.list_and_format_file_list(
'JavaScript', '/assets/lib/', tab.name, links=True,
upload=True, edit_url_template=self.filer_url_template(),
caption_if_empty='< inherited from /assets/lib/ >',
merge_local_files=True, all_paths=all_paths))
def get_assets_html(self, items, tab, all_paths):
items.append(self.list_and_format_file_list(
'HTML', '/assets/html/', tab.name, links=True,
upload=True, edit_url_template=self.filer_url_template(),
caption_if_empty='< inherited from /assets/html/ >',
merge_local_files=True, all_paths=all_paths))
def get_assets_templates(self, items, tab, all_paths):
items.append(self.list_and_format_file_list(
'View Templates', '/views/', tab.name, upload=True,
edit_url_template=self.filer_url_template(),
caption_if_empty='< inherited from /views/ >',
merge_local_files=True, all_paths=all_paths))
def get_analytics(self):
"""Renders course analytics view."""
tab = tabs.Registry.get_tab('analytics',
(self.request.get('tab') or
self.default_subtab_action['analytics']))
title_text = 'Analytics > %s' % tab.title
template_values = {
'page_title': self.format_title(title_text),
'main_content': analytics.generate_display_html(
self, crypto.XsrfTokenManager, tab.contents),
}
self.render_page(template_values)
def _render_roles_list(self):
"""Render roles list to HTML."""
all_roles = RoleDAO.get_all()
if all_roles:
output = safe_dom.Element('ul')
for role in sorted(all_roles, key=lambda r: r.name):
li = safe_dom.Element('li')
output.add_child(li)
li.add_text(role.name).add_child(self._create_edit_button(
'dashboard?action=edit_role&key=%s' % (role.id)
))
else:
output = safe_dom.Element('blockquote').add_text('< none >')
return output
def get_roles(self):
"""Renders course roles view."""
actions = [{
'id': 'add_role',
'caption': 'Add Role',
'href': self.get_action_url('add_role')}]
sections = [{
'title': 'Roles',
'description': messages.ROLES_DESCRIPTION,
'actions': actions,
'pre': self._render_roles_list()
}]
template_values = {
'page_title': self.format_title('Roles'),
'sections': sections,
}
self.render_page(template_values)
@classmethod
def map_action_to_permission(cls, action, permission):
"""Maps an action to a permission.
Map a GET or POST action that goes through the dashboard to a
permission to control which users have access. GET actions start with
'get_' while post actions start with 'post_'.
Example:
The i18n module maps both the actions 'get_i18n_dashboard' and
'get_i18_console' to the permission 'access_i18n_dashboard'.
Users who have a role assigned with this permission are then allowed
to perform these actions and thus access the translation tools.
Args:
action: a string specifying the action to map.
permission: a string specifying to which permission the action maps.
"""
cls._action_to_permission[action] = permission
@classmethod
def unmap_action_to_permission(cls, action):
del cls._action_to_permission[action]
@classmethod
def add_external_permission(cls, permission_name, permission_description):
"""Adds extra permissions that will be registered by the Dashboard."""
cls._external_permissions[permission_name] = permission_description
@classmethod
def remove_external_permission(cls, permission_name):
del cls._external_permissions[permission_name]
@classmethod
def permissions_callback(cls, unused_app_context):
return cls._external_permissions.iteritems()
@classmethod
def current_user_has_access(cls, app_context):
for action, _ in cls.get_nav_mappings():
if roles.Roles.is_user_allowed(
app_context, custom_module,
cls._action_to_permission.get('get_%s' % action, '')
):
return True
return False
@classmethod
def generate_dashboard_link(cls, app_context):
if cls.current_user_has_access(app_context):
return [('dashboard', 'Dashboard')]
return []
def register_module():
"""Registers this module in the registry."""
def on_module_enabled():
roles.Roles.register_permissions(
custom_module, DashboardHandler.permissions_callback)
ApplicationHandler.RIGHT_LINKS.append(
DashboardHandler.generate_dashboard_link)
def on_module_disabled():
roles.Roles.unregister_permissions(custom_module)
ApplicationHandler.RIGHT_LINKS.remove(
DashboardHandler.generate_dashboard_link)
data_sources.Registry.register(
student_answers_analytics.QuestionAnswersDataSource)
data_sources.Registry.register(
student_answers_analytics.CourseQuestionsDataSource)
data_sources.Registry.register(
student_answers_analytics.CourseUnitsDataSource)
data_sources.Registry.register(
student_answers_analytics.AnswersDataSource)
data_sources.Registry.register(
student_answers_analytics.RawAnswersDataSource)
data_sources.Registry.register(
student_answers_analytics.OrderedQuestionsDataSource)
multiple_choice_question = analytics.Visualization(
'multiple_choice_question',
'Multiple Choice Question',
'multiple_choice_question.html',
data_source_classes=[
synchronous_providers.QuestionStatsSource])
student_progress = analytics.Visualization(
'student_progress',
'Student Progress',
'student_progress.html',
data_source_classes=[
synchronous_providers.StudentProgressStatsSource])
enrollment_assessment = analytics.Visualization(
'enrollment_assessment',
'Enrollment/Assessment',
'enrollment_assessment.html',
data_source_classes=[
synchronous_providers.StudentEnrollmentAndScoresSource])
assessment_difficulty = analytics.Visualization(
'assessment_difficulty',
'Assessment Difficulty',
'assessment_difficulty.html',
data_source_classes=[
rest_providers.StudentAssessmentScoresDataSource])
labels_on_students = analytics.Visualization(
'labels_on_students',
'Labels on Students',
'labels_on_students.html',
data_source_classes=[rest_providers.LabelsOnStudentsDataSource])
question_answers = analytics.Visualization(
'question_answers',
'Question Answers',
'question_answers.html',
data_source_classes=[
student_answers_analytics.QuestionAnswersDataSource,
student_answers_analytics.CourseQuestionsDataSource,
student_answers_analytics.CourseUnitsDataSource])
gradebook = analytics.Visualization(
'gradebook',
'Gradebook',
'gradebook.html',
data_source_classes=[
student_answers_analytics.RawAnswersDataSource,
student_answers_analytics.OrderedQuestionsDataSource,
])
tabs.Registry.register('analytics', 'students', 'Students',
[labels_on_students,
student_progress,
enrollment_assessment],
placement=tabs.Placement.BEGINNING)
tabs.Registry.register('analytics', 'questions', 'Questions',
[multiple_choice_question, question_answers],
placement=tabs.Placement.BEGINNING)
tabs.Registry.register('analytics', 'assessments', 'Assessments',
[assessment_difficulty])
tabs.Registry.register('analytics', 'gradebook', 'Gradebook',
[gradebook])
tabs.Registry.register('assets', 'questions', 'Questions',
DashboardHandler.get_assets_questions)
tabs.Registry.register('assets', 'labels', 'Labels',
DashboardHandler.get_assets_labels)
tabs.Registry.register('assets', 'assessments', 'Assessments',
DashboardHandler.get_assets_assessments)
tabs.Registry.register('assets', 'activities', 'Activities',
DashboardHandler.get_assets_activities)
tabs.Registry.register('assets', 'images', 'Images & Documents',
DashboardHandler.get_assets_images)
tabs.Registry.register('assets', 'css', 'CSS',
DashboardHandler.get_assets_css)
tabs.Registry.register('assets', 'js', 'JavaScript',
DashboardHandler.get_assets_js)
tabs.Registry.register('assets', 'html', 'HTML',
DashboardHandler.get_assets_html)
tabs.Registry.register('assets', 'templates', 'Templates',
DashboardHandler.get_assets_templates)
tabs.Registry.register('assets', 'contrib', 'Extensions',
DashboardHandler.get_assets_contrib)
# Default item in tab group should be dead first in list for good UX.
tabs.Registry.register('settings', 'course', 'Course', 'course',
placement=tabs.Placement.BEGINNING)
tabs.Registry.register('settings', 'homepage', 'Homepage', 'homepage')
# TODO(jorr): Remove the dependency on the invitations module in this line
tabs.Registry.register('settings', 'registration', 'Registration',
'registration,invitation')
tabs.Registry.register('settings', 'units', 'Units and Lessons',
'unit,assessment')
tabs.Registry.register('settings', 'i18n', 'I18N', 'i18n')
# Keep [Admin] Preferences, About, Advanced at very end of list.
tabs.Registry.register('settings', 'admin_prefs', 'Preferences',
placement=tabs.Placement.END)
tabs.Registry.register('settings', 'about', 'About',
placement=tabs.Placement.END)
tabs.Registry.register('settings', 'advanced', 'Advanced',
placement=tabs.Placement.END)
global_routes = [
(
dashboard_utils.RESOURCES_PATH +'/material-design-icons/(.*)',
sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib',
'material-design-iconic-font-1.1.1.zip'))),
(dashboard_utils.RESOURCES_PATH +'/js/.*', tags.JQueryHandler),
(dashboard_utils.RESOURCES_PATH + '/.*', tags.ResourcesHandler)]
dashboard_handlers = [
('/dashboard', DashboardHandler),
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course Dashboard',
'A set of pages for managing Course Builder course.',
global_routes, dashboard_handlers,
notify_module_enabled=on_module_enabled,
notify_module_disabled=on_module_disabled)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of roles."""
__author__ = 'Glenn De Jonghe (gdejonghe@google.com)'
from common import schema_fields
from common import utils
from models.models import RoleDAO
from models.roles import Roles
from modules.dashboard import dto_editor
class RoleManagerAndEditor(dto_editor.BaseDatastoreAssetEditor):
"""An editor for editing and managing roles."""
def _prepare_template(self, key):
template_values = {}
template_values['page_title'] = self.format_title('Edit Role')
template_values['main_content'] = self.get_form(
RoleRESTHandler, key,
self.get_action_url('roles'))
return template_values
def get_add_role(self):
self.render_page(self._prepare_template(''), 'roles')
def get_edit_role(self):
self.render_page(
self._prepare_template(self.request.get('key')), 'roles')
class RoleRESTHandler(dto_editor.BaseDatastoreRestHandler):
"""REST handler for editing roles."""
URI = '/rest/role'
REQUIRED_MODULES = [
'inputex-hidden', 'inputex-string', 'inputex-checkbox',
'inputex-textarea', 'inputex-list', 'inputex-uneditable']
EXTRA_JS_FILES = ['resources/js/role_editor.js']
XSRF_TOKEN = 'role-edit'
SCHEMA_VERSIONS = ['1.5']
DAO = RoleDAO
INACTIVE_MODULES = 'Inactive Modules'
@classmethod
def _add_module_permissions_schema(cls, subschema, module_name):
item_type = schema_fields.FieldRegistry(
'Permission',
extra_schema_dict_values={'className': 'permission-item'})
item_type.add_property(schema_fields.SchemaField(
'assigned', 'Assigned', 'boolean', optional=True,
extra_schema_dict_values={'className': 'permission-assigned'}))
item_type.add_property(schema_fields.SchemaField(
'name', 'Name', 'string', editable=False, optional=True,
extra_schema_dict_values={'className': 'permission-name'}))
item_type.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
editable=False,
extra_schema_dict_values={'className': 'inputEx-description'}))
item_array = schema_fields.FieldArray(
module_name, module_name, item_type=item_type,
extra_schema_dict_values={'className': 'permission-module'})
subschema.add_property(item_array)
@classmethod
def get_schema(cls):
"""Return the InputEx schema for the roles editor."""
schema = schema_fields.FieldRegistry(
'Role', description='role')
schema.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
schema.add_property(schema_fields.SchemaField(
'name', 'Name', 'string', optional=True))
schema.add_property(schema_fields.SchemaField(
'description', 'Description', 'text', optional=True))
# TODO(gdejonghe) Use user.id instead of user.email
schema.add_property(schema_fields.SchemaField(
'users', 'User Emails', 'text', optional=True))
subschema = schema.add_sub_registry('modules', 'Permission Modules')
for module in Roles.get_modules():
cls._add_module_permissions_schema(subschema, module.name)
cls._add_module_permissions_schema(subschema, cls.INACTIVE_MODULES)
return schema
def _generate_permission(self, name, description, assigned):
return {
'name': name,
'description': description,
'assigned': assigned
}
def _generate_inactive_permission(self, permission, module=None):
if module is None:
return self._generate_permission(
permission, 'This permission is currently not registered.',
True)
return self._generate_permission(
permission, 'This permission was set by the module "%s" which is '
'currently not registered.' % (module), True)
def _update_dict_with_permissions(self, dictionary):
app_context = self.get_course().app_context
modules = {}
for (module, callback) in Roles.get_permissions():
modules[module.name] = []
for (permission, description) in callback(app_context):
modules[module.name].append(
self._generate_permission(permission, description, False))
dictionary['modules'] = modules
return dictionary
def get_default_content(self):
return self._update_dict_with_permissions(
{'version': self.SCHEMA_VERSIONS[0]})
def validate(self, role_dict, key, unused_schema_version, errors):
"""Validate the role data sent from the form."""
role_names = {role.name for role in RoleDAO.get_all() if (
not key or role.id != long(key))}
if not role_dict['name'] or role_dict['name'] in role_names:
errors.append('The role must have a unique non-empty name.')
def transform_after_editor_hook(self, role_dict):
"""Edit the dict generated by the role editor."""
role_dict['name'] = role_dict['name'].strip()
role_dict['users'] = utils.text_to_list(
role_dict['users'], utils.SPLITTER)
# Create new entry for the dict to store formatted information
role_dict['permissions'] = {}
for (module_name, permissions) in role_dict['modules'].iteritems():
assigned_permissions = []
for permission in permissions:
if permission['assigned']:
assigned_permissions.append(permission['name'])
if assigned_permissions:
role_dict['permissions'][module_name] = assigned_permissions
# Remove obsolete entry
del role_dict['modules']
return role_dict
def transform_for_editor_hook(self, role_dict):
"""Modify dict from datastore before it goes to the role editor."""
self._update_dict_with_permissions(role_dict)
for (module_name, permissions) in role_dict['modules'].iteritems():
assigned_permissions = role_dict['permissions'].get(
module_name, [])
# First set the checkboxes for the active permissions
for permission in permissions:
if permission['name'] in assigned_permissions:
assigned_permissions.remove(permission['name'])
permission['assigned'] = True
# Now generate fields for the assigned inactive permissions
permissions.extend([self._generate_inactive_permission(inactive)
for inactive in assigned_permissions])
# Pop active module
role_dict['permissions'].pop(module_name, None)
# Iterate over all the modules that are left over
role_dict['modules'][self.INACTIVE_MODULES] = []
for (module, permissions) in role_dict['permissions'].iteritems():
# Add all the permissions to the INACTIVE MODULES section
role_dict['modules'][self.INACTIVE_MODULES].extend([
self._generate_inactive_permission(inactive, module)
for inactive in permissions
])
role_dict['users'] = ', '.join(role_dict['users'])
del role_dict['permissions']
return role_dict
def after_save_hook(self):
Roles.update_permissions_map()
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registration of sub-tabs for under Dashboard > Analytics."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import collections
import re
class Placement(object):
# If needed, can add "FIRST_HALF" and "LAST_HALF" to further subdivide.
BEGINNING = 'beginning'
MIDDLE = 'middle'
END = 'end'
_ORDERED_PLACEMENTS = [
BEGINNING,
MIDDLE,
END,
]
@staticmethod
def cmp(a, b):
# pylint: disable=protected-access
return cmp(Placement._ORDERED_PLACEMENTS.index(a._placement),
Placement._ORDERED_PLACEMENTS.index(b._placement))
def __init__(self, *args, **kwargs):
# Not really a class; just a namespace.
raise NotImplementedError()
class Registry(object):
class _Tab(object):
def __init__(
self, group, name, title, contents, href=None, target=None,
placement=None):
if not re.match('^[a-z0-9_]+$', name):
raise ValueError('Sub-tabs under Dashboard must '
'have names consisting only of lowercase '
'letters, numbers, and underscore.')
self._group = group
self._name = name
self._title = title
self._contents = contents
self._href = href
self._target = target
self._placement = placement or Placement.MIDDLE
@property
def group(self):
return self._group
@property
def name(self):
return self._name
@property
def title(self):
return self._title
@property
def contents(self):
return self._contents
@contents.setter
def contents(self, contents):
self._contents = contents
@property
def href(self):
return self._href
@property
def target(self):
return self._target
@property
def placement(self):
return self._placement
_tabs_by_group = collections.defaultdict(list)
@classmethod
def register(
cls, group_name, tab_name, tab_title, contents=None, href=None,
target=None, placement=None):
if cls.get_tab(group_name, tab_name):
raise ValueError(
'There is already a sub-tab named "%s" ' % tab_name +
'registered in group %s.' % group_name)
cls._tabs_by_group[group_name].append(
Registry._Tab(group_name, tab_name, tab_title, contents, href,
target, placement))
cls._tabs_by_group[group_name].sort(cmp=Placement.cmp)
@classmethod
def unregister_group(cls, group_name):
# This method is deprecated.
if group_name in cls._tabs_by_group:
del cls._tabs_by_group[group_name]
@classmethod
def get_tab(cls, group_name, tab_name):
matches = [tab for tab in cls._tabs_by_group.get(group_name, [])
if tab.name == tab_name]
return matches[0] if matches else None
@classmethod
def get_tab_group(cls, group_name):
return cls._tabs_by_group.get(group_name, None)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for management of individual students' peer review assignments."""
__author__ = 'Sean Lip (sll@google.com)'
import os
import urllib
import messages
from controllers.lessons import create_readonly_assessment_params
from controllers.utils import ApplicationHandler
from models import courses
from models import models
from models import review
from models import roles
from models import student_work
from models import transforms
from modules.review import domain
class AssignmentsRights(object):
"""Manages view/edit rights for assignments and reviews."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class AssignmentManager(ApplicationHandler):
"""A view for managing human-reviewed assignments."""
def get_assignment_html(
self, peer_reviewed_units, unit_id=None, reviewee_id=None,
error_msg=None, readonly_assessment=None, review_steps=None,
reviewers=None, reviews_params=None, model_version=None):
"""Renders a template allowing an admin to select an assignment."""
edit_url = self.canonicalize_url('/dashboard')
return self.render_template_to_html({
'REVIEW_STATE_COMPLETED': domain.REVIEW_STATE_COMPLETED,
'add_reviewer_action': self.get_action_url('add_reviewer'),
'add_reviewer_xsrf_token': self.create_xsrf_token('add_reviewer'),
'delete_reviewer_action': self.get_action_url('delete_reviewer'),
'delete_reviewer_xsrf_token': self.create_xsrf_token(
'delete_reviewer'),
'edit_assignment_action': 'edit_assignment',
'edit_url': edit_url,
'error_msg': error_msg,
'peer_reviewed_units': peer_reviewed_units,
'readonly_student_assessment': readonly_assessment,
'reviewee_id': reviewee_id or '',
'reviewers': reviewers,
'reviews_params': reviews_params,
'review_steps': review_steps,
'unit_id': unit_id,
'model_version': model_version
}, 'assignments_menu.html', [os.path.dirname(__file__)])
def parse_request(self, course, unit_id, reviewee_id, reviewer_id=None):
"""Parses request parameters in a GET or POST request.
Args:
course: Course. A course object.
unit_id: str. The id of the unit.
reviewee_id: str. The email address of the reviewee.
reviewer_id: str. The email address of the reviewer.
Returns:
- a dict containing some subset of the following keys: unit,
reviewee, reviewer.
- if necessary, an error message to be passed to the frontend.
"""
request_params = {}
# Check unit validity.
if not unit_id:
return request_params, ''
unit = course.find_unit_by_id(unit_id)
if not unit:
return request_params, '404: Unit not found.'
if (unit.workflow.get_grader() != courses.HUMAN_GRADER or
unit.workflow.get_matcher() != review.PEER_MATCHER):
return request_params, '412: This unit is not peer-graded.'
request_params['unit'] = unit
# Check reviewee validity.
if not reviewee_id:
return request_params, '412: No student email supplied.'
reviewee = models.Student.get_enrolled_student_by_email(reviewee_id)
if not reviewee:
return (request_params,
'412: No student with this email address exists.')
request_params['reviewee'] = reviewee
# Check reviewer validity, if applicable.
if reviewer_id is not None:
if not reviewer_id:
return request_params, '412: No reviewer email supplied.'
reviewer = models.Student.get_enrolled_student_by_email(reviewer_id)
if not reviewer:
return (request_params,
'412: No reviewer with this email address exists.')
request_params['reviewer'] = reviewer
return request_params, ''
def get_edit_assignment(self):
"""Shows interface for selecting and viewing a student assignment."""
if not AssignmentsRights.can_view(self):
self.error(401)
return
course = courses.Course(self)
peer_reviewed_units = course.get_peer_reviewed_units()
page_title = 'Peer Review'
template_values = {}
template_values['page_title'] = self.format_title(page_title)
template_values['page_description'] = (
messages.ASSIGNMENTS_MENU_DESCRIPTION)
unit_id = self.request.get('unit_id')
if not unit_id:
# No unit has been set yet, so display an empty form.
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units)
self.render_page(template_values)
return
reviewee_id = self.request.get('reviewee_id')
# This field may be populated due to a redirect from a POST method.
post_error_msg = self.request.get('post_error_msg')
request_params, error_msg = self.parse_request(
course, unit_id, reviewee_id)
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
if error_msg:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg=error_msg)
self.render_page(template_values)
return
model_version = course.get_assessment_model_version(unit)
assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
get_readonly_assessment = self.get_readonly_assessment_1_4
get_readonly_review = self.get_readonly_review_1_4
elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:
get_readonly_assessment = self.get_readonly_assessment_1_5
get_readonly_review = self.get_readonly_review_1_5
else:
raise ValueError('Bad assessment model version: %s' % model_version)
# Render content.
rp = course.get_reviews_processor()
submission_and_review_steps = rp.get_submission_and_review_steps(
unit.unit_id, reviewee.get_key())
if not submission_and_review_steps:
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
error_msg='412: This student hasn\'t submitted the assignment.'
)
self.render_page(template_values)
return
readonly_assessment = get_readonly_assessment(
unit, submission_and_review_steps[0])
review_steps = submission_and_review_steps[1]
reviews = rp.get_reviews_by_keys(
unit.unit_id,
[review_step.review_key for review_step in review_steps],
handle_empty_keys=True)
reviews_params = []
reviewers = []
for idx, review_step in enumerate(review_steps):
params = get_readonly_review(unit, reviews[idx])
reviews_params.append(params)
reviewer = models.Student.get_student_by_user_id(
review_step.reviewer_key.name()).key().name()
reviewers.append(reviewer)
assert len(reviewers) == len(review_steps)
assert len(reviews_params) == len(review_steps)
template_values['main_content'] = self.get_assignment_html(
peer_reviewed_units, unit_id=unit_id, reviewee_id=reviewee_id,
readonly_assessment=readonly_assessment, review_steps=review_steps,
error_msg=post_error_msg, reviewers=reviewers,
reviews_params=reviews_params,
model_version=model_version)
self.render_page(template_values)
def get_readonly_assessment_1_4(self, unit, submission_content):
return create_readonly_assessment_params(
courses.Course(self).get_assessment_content(unit),
student_work.StudentWorkUtils.get_answer_list(submission_content))
def get_readonly_assessment_1_5(self, unit, submission_content):
return {
'content': unit.html_content,
'saved_answers': transforms.dumps(submission_content)
}
def get_readonly_review_1_4(self, unit, review_content):
return create_readonly_assessment_params(
courses.Course(self).get_review_content(unit),
student_work.StudentWorkUtils.get_answer_list(review_content))
def get_readonly_review_1_5(self, unit, review_content):
return {
'content': unit.html_review_form,
'saved_answers': transforms.dumps(review_content)
}
def post_add_reviewer(self):
"""Adds a new reviewer to a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
reviewer_id = self.request.get('reviewer_id')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id, reviewer_id=reviewer_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'reviewer_id': reviewer_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
unit = request_params.get('unit')
reviewee = request_params.get('reviewee')
reviewer = request_params.get('reviewer')
rp = course.get_reviews_processor()
reviewee_key = reviewee.get_key()
reviewer_key = reviewer.get_key()
try:
rp.add_reviewer(unit.unit_id, reviewee_key, reviewer_key)
except domain.TransitionError:
redirect_params['post_error_msg'] = (
'412: The reviewer is already assigned to this submission.')
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
def post_delete_reviewer(self):
"""Deletes a reviewer from a human-reviewed assignment."""
if not AssignmentsRights.can_edit(self):
self.error(401)
return
course = courses.Course(self)
unit_id = self.request.get('unit_id')
reviewee_id = self.request.get('reviewee_id')
review_step_key = self.request.get('key')
request_params, post_error_msg = self.parse_request(
course, unit_id, reviewee_id)
redirect_params = {
'action': 'edit_assignment',
'reviewee_id': reviewee_id,
'unit_id': unit_id,
}
if post_error_msg:
redirect_params['post_error_msg'] = post_error_msg
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
return
rp = course.get_reviews_processor()
unit = request_params.get('unit')
rp.delete_reviewer(unit.unit_id, review_step_key)
self.redirect('/dashboard?%s' % urllib.urlencode(redirect_params))
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of labels."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import schema_fields
from models import models
from modules.dashboard import dto_editor
from modules.dashboard import utils as dashboard_utils
class LabelManagerAndEditor(dto_editor.BaseDatastoreAssetEditor):
def lme_prepare_template(self, key):
return {
'page_title': self.format_title('Edit Label'),
'main_content': self.get_form(
LabelRestHandler, key,
dashboard_utils.build_assets_url('labels'))
}
def get_add_label(self):
self.render_page(self.lme_prepare_template(''),
'assets', 'labels')
def get_edit_label(self):
key = self.request.get('key')
label = models.LabelDAO.load(key)
if not label:
raise Exception('No label found')
self.render_page(self.lme_prepare_template(key=key),
'assets', 'labels')
class LabelRestHandler(dto_editor.BaseDatastoreRestHandler):
URI = '/rest/label'
REQUIRED_MODULES = [
'gcb-rte', 'inputex-radio', 'inputex-string', 'inputex-number',
'inputex-hidden', 'inputex-uneditable']
EXTRA_JS_FILES = []
XSRF_TOKEN = 'label-edit'
SCHEMA_VERSIONS = ['1.0']
DAO = models.LabelDAO
@classmethod
def get_schema(cls):
schema = schema_fields.FieldRegistry('Label', 'label')
schema.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
schema.add_property(schema_fields.SchemaField(
'id', 'ID', 'string', optional=True, editable=False))
schema.add_property(schema_fields.SchemaField(
'title', 'Title', 'string'))
schema.add_property(schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
description='A brief statement outlining similarities among '
'items marked with this label.'))
schema.add_property(schema_fields.SchemaField(
'type', 'Type', 'integer',
description='The purpose for which this label will be used. '
'E.g., <b>Course Track</b> labels are used to match to labels on '
'students to select which units the student will see when '
'taking the course. <b>Locale</b> labels are automatically '
'created by the system and are used to select content applicable '
'to a specific language and/or country. More types of label will '
'be added as more features are added.',
select_data=[
(lt.type, lt.title) for lt in (
models.LabelDTO.USER_EDITABLE_LABEL_TYPES)],
extra_schema_dict_values={
'_type': 'radio',
'className': 'label-selection'}))
return schema
def sanitize_input_dict(self, json_dict):
json_dict['id'] = None
json_dict['title'] = json_dict['title'].strip()
json_dict['description'] = json_dict['description'].strip()
json_dict['type'] = int(json_dict['type'])
def validate(self, label_dict, key, version, errors):
# Only one version currently supported, and version has already
# been checked, so no need for dispatch.
self._validate_10(label_dict, key, errors)
def _validate_10(self, label_dict, key, errors):
existing_label = models.LabelDAO.load(key)
for label in models.LabelDTO.SYSTEM_EDITABLE_LABEL_TYPES:
# prevent adding
if label.type == label_dict['type']:
errors.append(
'Unable to add system-managed label '
'type %s.' % label.type)
# prevent edit
if existing_label:
if label.type == existing_label.type:
errors.append(
'Unable to edit system-managed label '
'type %s.' % label.type)
for label in models.LabelDAO.get_all():
if (label.title == label_dict['title'] and
(not key or label.id != long(key))):
errors.append('There is already a label with this title!')
def is_deletion_allowed(self, label):
# TODO(mgainer): When labels on course units get modified to be
# IDs of labels rather than strings, enforce non-deletion of
# labels until they are removed from units.
#
# No integrity checks against Student objects; there will be
# too many to economically check. We will handle this by
# simply gracefully handling (and removing, when convenient)
# broken Label references on students. (This is morally
# equivalent to having an admin actually delete the label from
# all students, but here, we're just paying an amortized cost,
# rather than taking it all up front.)
return True
def get_default_content(self):
return {
'version': self.SCHEMA_VERSIONS[0],
'title': 'New Label',
'description': '',
'type': models.LabelDTO.LABEL_TYPE_GENERAL,
}
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analytics for extracting facts based on StudentAnswerEntity entries."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import ast
import datetime
from mapreduce import context
from common import crypto
from common import schema_fields
from common import tags
from models import courses
from models import data_sources
from models import entities
from models import event_transforms
from models import jobs
from models import models
from models import transforms
from tools import verify
from google.appengine.ext import db
MAX_INCORRECT_REPORT = 5
class QuestionAnswersEntity(entities.BaseEntity):
"""Student answers to individual questions."""
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class RawAnswersGenerator(jobs.MapReduceJob):
"""Extract answers from all event types into QuestionAnswersEntity table."""
@staticmethod
def get_description():
return 'raw question answers'
@staticmethod
def entity_class():
return models.EventEntity
def build_additional_mapper_params(self, app_context):
return {
'questions_by_usage_id': (
event_transforms.get_questions_by_usage_id(app_context)),
'valid_question_ids': (
event_transforms.get_valid_question_ids()),
'group_to_questions': (
event_transforms.get_group_to_questions()),
'assessment_weights':
event_transforms.get_assessment_weights(app_context),
'unscored_lesson_ids':
event_transforms.get_unscored_lesson_ids(app_context),
}
@staticmethod
def map(event):
"""Extract question responses from all event types providing them."""
if event.source not in (
'submit-assessment',
'attempt-lesson',
'tag-assessment'):
return
# Fetch global params set up in build_additional_mapper_params(), above.
params = context.get().mapreduce_spec.mapper.params
questions_info = params['questions_by_usage_id']
valid_question_ids = params['valid_question_ids']
group_to_questions = params['group_to_questions']
assessment_weights = params['assessment_weights']
timestamp = int(
(event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
content = transforms.loads(event.data)
if event.source == 'submit-assessment':
answer_data = content.get('values', {})
# TODO(mgainer): handle assessment-as-form submissions. Current
# implementation only understands Question and QuestionGroup;
# forms are simply submitted as lists of fields.
# TODO(mgainer): Handle peer-review scoring
if not isinstance(answer_data, dict):
return
version = answer_data.get('version')
if version == '1.5':
answers = event_transforms.unpack_student_answer_1_5(
questions_info, valid_question_ids, assessment_weights,
group_to_questions, answer_data, timestamp)
elif event.source == 'attempt-lesson':
# Very odd that the version should be in the answers map....
version = content.get('answers', {}).get('version')
if version == '1.5':
answers = event_transforms.unpack_student_answer_1_5(
questions_info, valid_question_ids, assessment_weights,
group_to_questions, content, timestamp)
elif event.source == 'tag-assessment':
answers = event_transforms.unpack_check_answers(
content, questions_info, valid_question_ids, assessment_weights,
group_to_questions, timestamp)
yield (event.user_id, [list(answer) for answer in answers])
@staticmethod
def reduce(key, answers_lists):
"""Does not produce output to Job. Instead, stores values to DB."""
answers = []
for data in answers_lists:
answers += ast.literal_eval(data)
data = transforms.dumps(answers)
QuestionAnswersEntity(key_name=key, data=data).put()
class RawAnswersDataSource(data_sources.AbstractDbTableRestDataSource):
"""Make raw answers from QuestionAnswersEntity available via REST."""
@staticmethod
def required_generators():
return [RawAnswersGenerator]
@classmethod
def get_entity_class(cls):
return QuestionAnswersEntity
@classmethod
def get_name(cls):
return 'raw_student_answers'
@classmethod
def get_title(cls):
return 'Raw Student Answers'
@classmethod
def get_default_chunk_size(cls):
# Selecting answers by student turns into a where-in clause, which
# in turn turns into N different '==' filters, and AppEngine supports
# at most 30.
# TODO(mgainer): Do something clever so that the students who have
# non-blank data here are returned in the earlier pages.
# TODO(mgainer): For students with no data, return blank items so
# we at least see rows for them in the UI, even if there are no scores.
return 25
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Raw Student Answers',
description='Raw data of answers to all uses of all graded '
'questions (excludes self-check non-graded questions in lessons) '
'in the course.')
reg.add_property(schema_fields.SchemaField(
'user_id', 'User ID', 'string',
description='ID of the student providing this answer.'))
reg.add_property(schema_fields.SchemaField(
'user_name', 'User Name', 'string',
description='Name of the student providing this answer.'))
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='ID of unit or assessment for this score.'))
reg.add_property(schema_fields.SchemaField(
'lesson_id', 'Lesson ID', 'string', optional=True,
description='ID of lesson for this score.'))
reg.add_property(schema_fields.SchemaField(
'sequence', 'Sequence', 'integer',
description='0-based order within containing assessment/lesson.'))
reg.add_property(schema_fields.SchemaField(
'question_id', 'Question ID', 'string',
description='ID of question. Key to models.QuestionDAO'))
reg.add_property(schema_fields.SchemaField(
'question_type', 'Question Type', 'string',
description='Kind of question. E.g., "SaQuestion" or "McQuestion" '
'for single-answer and multiple-choice, respectively.'))
reg.add_property(schema_fields.SchemaField(
'timestamp', 'Question ID', 'integer',
description='Seconds since 1970-01-01 in GMT when answer given.'))
choice_type = schema_fields.SchemaField(
'answer', 'Answer', 'string',
description='An answer to the question')
reg.add_property(schema_fields.FieldArray(
'answers', 'Answers', item_type=choice_type,
description='The answer from the student. Note that '
'this may be an array for questions permitting multiple answers.'))
reg.add_property(schema_fields.SchemaField(
'score', 'Score', 'number',
description='Value from the Question indicating the score for '
'this answer or set of answers.'))
reg.add_property(schema_fields.SchemaField(
'tallied', 'Tallied', 'boolean',
description='Whether the score counts towards the overall grade. '
'Lessons by default do not contribute to course score, but may '
'be marked as graded.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def _postprocess_rows(cls, app_context, source_context, schema, log,
page_number, rows):
"""Unpack all responses from single student into separate rows."""
# Fill in responses with actual student name, not just ID.
student_ids = []
for entity in rows:
student_ids.append(entity.key().id_or_name())
students = (models.Student
.all()
.filter('user_id in', student_ids)
.fetch(len(student_ids)))
# Prepare to convert multiple-choice question indices to answer strings.
mc_choices = {}
for question in models.QuestionDAO.get_all():
if 'choices' in question.dict:
mc_choices[str(question.id)] = [
choice['text'] for choice in question.dict['choices']]
ret = []
for entity, student in zip(rows, students):
raw_answers = transforms.loads(entity.data)
answers = [event_transforms.QuestionAnswerInfo(*parts)
for parts in raw_answers]
for answer in answers:
if answer.question_id in mc_choices:
choices = mc_choices[answer.question_id]
given_answers = [choices[i] for i in answer.answers]
else:
given_answers = answer.answers
if not isinstance(given_answers, list):
given_answers = [given_answers]
ret.append({
'user_id': student.user_id,
'user_name': student.name,
'unit_id': str(answer.unit_id),
'lesson_id': str(answer.lesson_id),
'sequence': answer.sequence,
'question_id': str(answer.question_id),
'question_type': answer.question_type,
'timestamp': answer.timestamp,
'answers': given_answers,
'score': float(answer.score),
'tallied': answer.tallied,
})
return ret
class AnswersDataSource(RawAnswersDataSource):
"""Exposes user-ID-obscured versions of all answers to all questions.
This data source is meant to be used for aggregation or export to
BigQuery (in contrast to RawAnswersDataSource, which should only ever
be used within CourseBuilder, as that class exposes un-obscured user
IDs and names).
"""
@classmethod
def get_name(cls):
return 'answers'
@classmethod
def get_title(cls):
return 'Answers'
@classmethod
def get_default_chunk_size(cls):
return 1000
@classmethod
def get_schema(cls, app_context, log, source_context):
schema = super(AnswersDataSource, cls).get_schema(app_context, log,
source_context)
schema.pop('user_name')
return schema
@classmethod
def _postprocess_rows(cls, app_context, source_context, schema, log,
page_number, rows):
items = super(AnswersDataSource, cls)._postprocess_rows(
app_context, source_context, schema, log, page_number, rows)
for item in items:
item.pop('user_name')
item['user_id'] = crypto.hmac_sha_2_256_transform(
source_context.pii_secret, item['user_id'])
return items
class OrderedQuestionsDataSource(data_sources.SynchronousQuery):
"""Simple "analytic" giving names of each question, in course order.
This class cooperates with the Jinja template in gradebook.html to
generate the header for the Gradebook analytics sub-tab. It also
generates the expected list of questions, in course order. This
set of questions sets the order for the question responses
provided by RawAnswersDataSource (above).
"""
@staticmethod
def fill_values(app_context, template_values):
"""Sets values into the dict used to fill out the Jinja template."""
def _find_q_ids(html, groups):
"""Returns the list of question IDs referenced from rich HTML."""
question_ids = []
for component in tags.get_components_from_html(html):
if component['cpt_name'] == 'question':
question_ids.append(int(component['quid']))
elif component['cpt_name'] == 'question-group':
qgid = int(component['qgid'])
if qgid in groups:
for question_id in groups[qgid]:
question_ids.append(int(question_id))
return question_ids
def _look_up_questions(questions, question_ids):
"""Build a dict used to build HTML for one column for one question.
Args:
questions: Map from question ID to QuestionDAO
question_ids: Set of IDS for which we want to build helper dicts.
Returns:
An array of dicts, one per question named in question_ids.
"""
ret = []
for qid in list(question_ids):
if qid not in questions:
question_ids.remove(qid)
continue
ret.append({
'id': qid,
'description': questions[qid],
'href': 'dashboard?action=edit_question&key=%s' % qid,
})
return ret
def _q_key(unit_id, lesson_id, question_id):
return '%s.%s.%s' % (unit_id, lesson_id, question_id)
def _add_assessment(unit):
q_ids = _find_q_ids(unit.html_content, groups)
return (
[_q_key(unit.unit_id, None, q_id) for q_id in q_ids],
{
'unit_id': None,
'title': None,
'questions': _look_up_questions(questions, q_ids)
})
def _add_sub_assessment(unit, assessment):
q_ids = _find_q_ids(assessment.html_content, groups)
return (
[_q_key(assessment.unit_id, None, q_id) for q_id in q_ids],
{
'href': 'unit?unit=%s&assessment=%s' % (
unit.unit_id, assessment.unit_id),
'unit_id': assessment.unit_id,
'title': assessment.title,
'questions': _look_up_questions(questions, q_ids),
'tallied': True,
})
def _add_lesson(unit, lesson):
q_ids = _find_q_ids(lesson.objectives, groups)
return (
[_q_key(unit.unit_id, lesson.lesson_id, qid) for qid in q_ids],
{
'href': 'unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id),
'lesson_id': lesson.lesson_id,
'title': lesson.title,
'questions': _look_up_questions(questions, q_ids),
'tallied': lesson.scored,
})
def _count_colspans(units):
for unit in units:
unit_colspan = 0
for item in unit['contents']:
# answer/score for each question, plus subtotal for section.
item['colspan'] = len(item['questions']) * 2
unit_colspan += item['colspan']
# If a unit contains more than one sub-unit, we need a subtotal
# column.
if len(unit['contents']) > 1:
for item in unit['contents']:
if item['tallied']:
item['colspan'] += 1
unit_colspan += 1
# +1 for unit total column
unit['colspan'] = unit_colspan + 1
course = courses.Course(None, app_context)
questions = {q.id: q.description for q in models.QuestionDAO.get_all()}
groups = {
g.id: g.question_ids for g in models.QuestionGroupDAO.get_all()}
units = []
question_keys = []
# Walk through the course in display order, gathering all items
# that may contain questions. This is used to build up the HTML
# table headers for display.
for unit in course.get_units():
# Skip contained pre/post assessments; these will be done in their
# containing unit.
if course.get_parent_unit(unit.unit_id):
continue
# Only deal with known unit types
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
href = 'assessment?name=%s' % unit.unit_id
elif unit.type == verify.UNIT_TYPE_UNIT:
href = 'unit?unit=%s' % unit.unit_id,
else:
continue
unit_contents = []
if unit.type == verify.UNIT_TYPE_ASSESSMENT:
q_keys, contents = _add_assessment(unit)
if q_keys:
question_keys += q_keys + ['subtotal']
unit_contents.append(contents)
if unit.pre_assessment:
assessment = course.find_unit_by_id(unit.pre_assessment)
if assessment:
q_keys, contents = _add_sub_assessment(unit, assessment)
if q_keys:
question_keys += q_keys + ['subtotal']
unit_contents.append(contents)
for lesson in course.get_lessons(unit.unit_id):
q_keys, contents = _add_lesson(unit, lesson)
if q_keys:
question_keys += q_keys
if lesson.scored:
question_keys += ['subtotal']
unit_contents.append(contents)
if unit.post_assessment:
assessment = course.find_unit_by_id(unit.post_assessment)
if assessment:
q_keys, contents = _add_sub_assessment(unit, assessment)
if q_keys:
question_keys += q_keys + ['subtotal']
unit_contents.append(contents)
if unit_contents:
units.append({
'href': href,
'unit_id': unit.unit_id,
'title': unit.title,
'contents': unit_contents,
})
# If there is only one sub-component within the unit, pop off
# the 'subtotal' column.
if len(unit_contents) == 1:
question_keys.pop()
question_keys.append('total')
_count_colspans(units)
template_values['units'] = units
template_values['gradebook_js_vars'] = transforms.dumps(
{'question_keys': question_keys})
class StudentAnswersStatsGenerator(jobs.MapReduceJob):
@staticmethod
def get_description():
return 'student answers'
@staticmethod
def entity_class():
return models.StudentAnswersEntity
def build_additional_mapper_params(self, app_context):
return {
'questions_by_usage_id': (
event_transforms.get_questions_by_usage_id(app_context)),
'valid_question_ids': (
event_transforms.get_valid_question_ids()),
'group_to_questions': (
event_transforms.get_group_to_questions()),
'assessment_weights':
event_transforms.get_assessment_weights(app_context),
'unscored_lesson_ids':
event_transforms.get_unscored_lesson_ids(app_context),
}
@staticmethod
def build_key(unit, sequence, question_id, question_type):
return '%s_%d_%s_%s' % (unit, sequence, question_id, question_type)
@staticmethod
def parse_key(key):
unit, sequence, question_id, question_type = key.split('_')
return unit, int(sequence), question_id, question_type
@staticmethod
def map(student_answers):
params = context.get().mapreduce_spec.mapper.params
questions_by_usage_id = params['questions_by_usage_id']
valid_question_ids = params['valid_question_ids']
group_to_questions = params['group_to_questions']
assessment_weights = params['assessment_weights']
all_answers = transforms.loads(student_answers.data)
for unit_id, unit_responses in all_answers.items():
# Is this a CourseBuilder Question/QuestionGroup set of answers?
if ('containedTypes' in unit_responses and
unit_responses['version'] == '1.5'):
for answer in event_transforms.unpack_student_answer_1_5(
questions_by_usage_id, valid_question_ids,
assessment_weights, group_to_questions, unit_responses,
timestamp=0):
yield (StudentAnswersStatsGenerator.build_key(
unit_id, answer.sequence, answer.question_id,
answer.question_type), (answer.answers, answer.score))
# TODO(mgainer): Emit warning counter here if we don't grok
# the response type. We will need to cope with Oppia and
# XBlocks responses. Do that in a follow-on CL.
@staticmethod
def reduce(key, answers_and_score_list):
correct_answers = {}
incorrect_answers = {}
unit_id, sequence, question_id, question_type = (
StudentAnswersStatsGenerator.parse_key(key))
unit_id = int(unit_id)
question_id = long(question_id)
for packed_data in answers_and_score_list:
answers, score = ast.literal_eval(packed_data)
if question_type == 'SaQuestion':
if score > 0:
# Note: 'answers' only contains one item (not a list) for
# SaQuestion.
correct_answers.setdefault(answers, 0)
correct_answers[answers] += 1
else:
incorrect_answers.setdefault(answers, 0)
incorrect_answers[answers] += 1
elif question_type == 'McQuestion':
# For multiple-choice questions, we only get one overall score
# for the question as a whole. This means that some choices
# may be incorrect. Happily, though, the only reason we care
# about the distinction between correct/incorrect is to limit
# the quantity of output for incorrect answers. Since
# multiple-choice questions are inherently limited, just
# call all of the answers 'correct'.
for sub_answer in answers:
correct_answers.setdefault(sub_answer, 0)
correct_answers[sub_answer] += 1
def build_reduce_dict(unit_id, sequence, question_id, is_valid,
answer, count):
# NOTE: maintain members in parallel with get_schema() below.
if not isinstance(answer, basestring):
answer = str(answer) # Convert numbers to strings.
return ({'unit_id': str(unit_id),
'sequence': sequence,
'question_id': str(question_id),
'is_valid': is_valid,
'answer': answer,
'count': count})
# Emit tuples for each of the correct answers.
for answer, count in correct_answers.items():
yield(build_reduce_dict(unit_id, sequence, question_id, True,
answer, count))
# Emit tuples for incorrect answers. Free-form answer fields can have
# a lot of wrong answers. Only report the most-commonly-occuring N
# answers, and report a total for the rest.
if incorrect_answers:
sorted_incorrect = [(v, k) for k, v in incorrect_answers.items()]
sorted_incorrect.sort()
sorted_incorrect.reverse()
for count, answer in sorted_incorrect[0:MAX_INCORRECT_REPORT]:
yield(build_reduce_dict(unit_id, sequence, question_id, False,
answer, count))
total_other_incorrect = 0
for count, _ in sorted_incorrect[MAX_INCORRECT_REPORT:]:
total_other_incorrect += count
if total_other_incorrect:
yield(build_reduce_dict(unit_id, sequence, question_id, False,
'Other Incorrect Answers',
total_other_incorrect))
class QuestionAnswersDataSource(data_sources.AbstractSmallRestDataSource):
@staticmethod
def required_generators():
return [StudentAnswersStatsGenerator]
@classmethod
def get_name(cls):
return 'question_answers'
@classmethod
def get_title(cls):
return 'Question Answers'
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
# NOTE: maintain members in parallel with build_reduce_dict() above.
reg = schema_fields.FieldRegistry(
'Question Answers',
description='Summarized results for each use of each question')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='ID of unit in which question appears. Key to Unit'))
reg.add_property(schema_fields.SchemaField(
'sequence', 'Sequence', 'integer',
description='Ordering within course for question.'))
reg.add_property(schema_fields.SchemaField(
'question_id', 'Question ID', 'string',
description='ID of question. Key to models.QuestionDAO'))
reg.add_property(schema_fields.SchemaField(
'is_valid', 'Is Valid', 'boolean',
description='Whether the answer is "valid". An answer is '
'valid if it is one of the defined answers to the question. '
'All answers to multiple-choice questions, correct or incorrect '
'are considered valid. Answers to single-answer questions '
'(i.e., type-in-an-answer) questions are only considered valid '
'if they earned a positive score. The most-commonly guessed '
'wrong answers are also reported with this field set to False. '
'The count of the rest of the wrong answers is lumped into a '
'single item, "Other Incorrect Answers".'))
reg.add_property(schema_fields.SchemaField(
'answer', 'Answer', 'string',
description='The actually-selected answer'))
reg.add_property(schema_fields.SchemaField(
'count', 'Count', 'integer',
description='The number of times this answer was given.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, unused_source_context, unused_schema,
unused_catch_and_log, unused_page_number,
student_answers_job):
def ordering(a1, a2):
return (cmp(a1['unit_id'], a2['unit_id']) or
cmp(a1['sequence'], a2['sequence']) or
cmp(a2['is_valid'], a1['is_valid']) or
cmp(a1['answer'], a2['answer']))
ret = list(jobs.MapReduceJob.get_results(student_answers_job))
ret.sort(ordering)
return ret, 0
class CourseQuestionsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'course_questions'
@classmethod
def get_title(cls):
return 'Course Questions'
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
reg = schema_fields.FieldRegistry(
'Course Questions',
description='Facts about each usage of each question in a course.')
reg.add_property(schema_fields.SchemaField(
'question_id', 'Question ID', 'string',
description='ID of question. Key to models.QuestionDAO'))
reg.add_property(schema_fields.SchemaField(
'description', 'Description', 'string',
description='User-entered description of question.'))
reg.add_property(schema_fields.SchemaField(
'text', 'Text', 'string',
description='Text of the question.'))
# pylint: disable=unused-variable
arrayMember = schema_fields.SchemaField(
'option_text', 'Option Text', 'string',
description='Text of the multiple-choice option')
reg.add_property(schema_fields.FieldArray(
'choices', 'Choices', item_type=arrayMember,
description='Multiple-choice question options'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, unused_source_context, unused_schema,
unused_catch_and_log, unused_page_number):
# Look up questions from DB.
questions = []
for question in models.QuestionDAO.get_all():
item = {
'question_id': str(question.id),
'description': question.dict['description'],
'text': question.dict['question'],
}
if 'choices' in question.dict:
item['choices'] = [c['text'] for c in question.dict['choices']]
else:
item['choices'] = []
questions.append(item)
return questions, 0
class CourseUnitsDataSource(data_sources.AbstractSmallRestDataSource):
@classmethod
def get_name(cls):
return 'course_units'
@classmethod
def get_title(cls):
return 'Course Units'
@classmethod
def exportable(cls):
return True
@classmethod
def get_schema(cls, unused_app_context, unused_catch_and_log,
unused_source_context):
# NOTE: maintain members in parallel with build_reduce_dict() above.
reg = schema_fields.FieldRegistry(
'Units',
description='Units (units, assessments, links) in a course')
reg.add_property(schema_fields.SchemaField(
'unit_id', 'Unit ID', 'string',
description='ID of unit in which question appears. Key to Unit'))
reg.add_property(schema_fields.SchemaField(
'now_available', 'Now Available', 'boolean',
description='Whether the unit is publicly available'))
reg.add_property(schema_fields.SchemaField(
'type', 'Type', 'string',
description='Type of unit. "U":unit, "A":assessment, "L":link'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
description='Display title of the unit'))
reg.add_property(schema_fields.SchemaField(
'release_date', 'Release Date', 'string',
description='Date the unit is to be made publicly available'))
reg.add_property(schema_fields.SchemaField(
'props', 'Properties', 'string',
description='Site-specific additional properties added to unit'))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number',
description='Weight to give to the unit when scoring.'))
return reg.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, unused_source_context, unused_schema,
unused_catch_and_log, unused_page_number):
# Look up questions from DB.
units = []
for unit in courses.Course(None, app_context).get_units():
units.append({
'unit_id': str(unit.unit_id),
'type': unit.type,
'title': unit.title,
'release_date': unit.release_date,
'now_available': unit.now_available,
'props': str(unit.properties),
'weight': float(unit.weight)
})
return units, 0
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core custom tags."""
__author__ = 'John Orr (jorr@google.com)'
import os
import re
import urllib
import urlparse
from xml.etree import cElementTree
import markdown
import appengine_config
from common import crypto
from common import jinja_utils
from common import schema_fields
from common import tags
from common import utils as common_utils
from controllers import utils
from models import courses
from models import custom_modules
from models import models
from models import roles
from models import transforms
from modules.oeditor import oeditor
_RESOURCE_PREFIX = '/modules/core_tags'
RESOURCE_FOLDER = _RESOURCE_PREFIX + '/resources/'
_OEDITOR_RESOURCE_FOLDER = '/modules/oeditor/resources/'
_DRIVE_TAG_REFRESH_SCRIPT = RESOURCE_FOLDER + 'drive_tag_refresh.js'
_IFRAME_RESIZE_SCRIPT = _OEDITOR_RESOURCE_FOLDER + 'resize_iframes.js'
_PARENT_FRAME_SCRIPT = RESOURCE_FOLDER + 'drive_tag_parent_frame.js'
_SCRIPT_MANAGER_SCRIPT = RESOURCE_FOLDER + 'drive_tag_script_manager.js'
_RESOURCE_ABSPATH = os.path.join(os.path.dirname(__file__), 'resources')
_TEMPLATES_ABSPATH = os.path.join(os.path.dirname(__file__), 'templates')
_GOOGLE_DRIVE_TAG_PATH = _RESOURCE_PREFIX + '/googledrivetag'
_GOOGLE_DRIVE_TAG_RENDERER_PATH = _RESOURCE_PREFIX + '/googledrivetagrenderer'
def _escape_url(url, force_https=True):
"""Escapes/quotes url parts to sane user input."""
scheme, netloc, path, query, unused_fragment = urlparse.urlsplit(url)
if force_https:
scheme = 'https'
path = urllib.quote(path)
query = urllib.quote_plus(query, '=?&;')
return urlparse.urlunsplit((scheme, netloc, path, query, unused_fragment))
def _replace_url_query(url, new_query):
"""Replaces the query part of a URL with a new one."""
scheme, netloc, path, _, fragment = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, new_query, fragment))
class _Runtime(object):
"""Derives runtime configuration state from CB application context."""
def __init__(self, app_context):
self._app_context = app_context
self._environ = self._app_context.get_environ()
def can_edit(self):
return roles.Roles.is_course_admin(self._app_context)
def courses_can_use_google_apis(self):
return courses.COURSES_CAN_USE_GOOGLE_APIS.value
def configured(self):
return (
self.courses_can_use_google_apis() and
bool(self.get_api_key()) and
bool(self.get_client_id()))
def get_api_key(self):
course, google, api_key = courses.CONFIG_KEY_GOOGLE_API_KEY.split(':')
return self._environ.get(course, {}).get(google, {}).get(api_key, '')
def get_client_id(self):
course, google, client_id = courses.CONFIG_KEY_GOOGLE_CLIENT_ID.split(
':')
return self._environ.get(
course, {}
).get(
google, {}
).get(
client_id, '')
def get_slug(self):
return self._app_context.get_slug()
class CoreTag(tags.BaseTag):
"""All core custom tags derive from this class."""
@classmethod
def vendor(cls):
return 'gcb'
@classmethod
def create_icon_url(cls, name):
"""Creates a URL for an icon with a specific name."""
return os.path.join(RESOURCE_FOLDER, name)
class GoogleDoc(CoreTag):
"""Custom tag for a Google Doc."""
@classmethod
def name(cls):
return 'Google Doc'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url(_replace_url_query(link, 'embedded=true'))
iframe = cElementTree.XML("""
<iframe class="google-doc" title="Google Doc" type="text/html" frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return self.create_icon_url('docs.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleDoc.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the Document link.
# Changes to the publication status of a document or to its
# contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Document Link', 'string',
optional=True,
description=('Provide the "Document Link" from the Google Docs '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the document, in pixels. Width will be '
'set automatically')))
return reg
class GoogleDrive(CoreTag, tags.ContextAwareTag):
"""Custom tag for Google Drive items."""
CONTENT_CHUNK_TYPE = 'google-drive'
@classmethod
def additional_dirs(cls):
return [_RESOURCE_ABSPATH]
@classmethod
def extra_css_files(cls):
return ['google_drive_tag.css']
@classmethod
def extra_js_files(cls):
return ['drive_tag_child_frame.js', 'google_drive_tag_lightbox.js']
@classmethod
def name(cls):
return 'Google Drive'
@classmethod
def on_register(cls):
oeditor.ObjectEditor.EXTRA_SCRIPT_TAG_URLS.append(
cls._oeditor_extra_script_tags_urls)
@classmethod
def on_unregister(cls):
oeditor.ObjectEditor.EXTRA_SCRIPT_TAG_URLS.remove(
cls._oeditor_extra_script_tags_urls)
@classmethod
def _oeditor_extra_script_tags_urls(cls):
script_urls = []
if courses.COURSES_CAN_USE_GOOGLE_APIS.value:
# Order matters here because scripts are inserted in the order they
# are found in this list, and later ones may refer to symbols from
# earlier ones.
script_urls.append(_SCRIPT_MANAGER_SCRIPT)
script_urls.append(_PARENT_FRAME_SCRIPT)
return script_urls
def get_icon_url(self):
return self.create_icon_url('drive.png')
def get_schema(self, handler):
api_key = None
client_id = None
if handler:
runtime = _Runtime(handler.app_context)
if not runtime.configured():
return self.unavailable_schema(
'Google Drive is not available. Please make sure the '
'global gcb_courses_can_use_google_apis setting is True '
'and set the Google API Key and Google Client ID in course '
'settings in order to use this tag.')
api_key = runtime.get_api_key()
client_id = runtime.get_client_id()
reg = schema_fields.FieldRegistry(GoogleDrive.name())
reg.add_property(
schema_fields.SchemaField(
'document-id', 'Document ID', 'string',
optional=True, # Validation enforced by JS code.
description='The ID of the Google Drive item you want to '
'use', i18n=False, extra_schema_dict_values={
'api-key': api_key,
'client-id': client_id,
'type-id': self.CONTENT_CHUNK_TYPE,
'xsrf-token': GoogleDriveRESTHandler.get_xsrf_token(),
}))
return reg
def render(self, node, context):
runtime = _Runtime(context.handler.app_context)
resource_id = node.attrib.get('document-id')
src = self._get_tag_renderer_url(
runtime.get_slug(), self.CONTENT_CHUNK_TYPE, resource_id)
tag = cElementTree.Element('div')
tag.set('class', 'google-drive google-drive-container')
if runtime.can_edit():
controls = cElementTree.Element('div')
controls.set('class', 'google-drive google-drive-controls')
controls.set('data-api-key', runtime.get_api_key())
controls.set('data-client-id', runtime.get_client_id())
controls.set('data-document-id', resource_id)
controls.set(
'data-xsrf-token', GoogleDriveRESTHandler.get_xsrf_token())
tag.append(controls)
iframe = cElementTree.Element('iframe')
iframe.set(
'class',
'google-drive google-drive-content-iframe gcb-needs-resizing')
iframe.set('frameborder', '0')
iframe.set('scrolling', 'no')
iframe.set('src', src)
iframe.set('title', 'Google Drive')
iframe.set('width', '100%')
tag.append(iframe)
return tag
def rollup_header_footer(self, context):
runtime = _Runtime(context.handler.app_context)
can_edit = runtime.can_edit()
srcs = [_IFRAME_RESIZE_SCRIPT]
if can_edit: # Harmless but wasteful to give to non-admins.
srcs = [_SCRIPT_MANAGER_SCRIPT] + srcs
header = cElementTree.Element('div')
for src in srcs:
script = cElementTree.Element('script')
script.set('src', src)
header.append(script)
# Put in footer so other scripts will already be loaded when our main
# fires. Give script to admins only (though note that even if non-admins
# grab the script we won't give them the XSRF tokens they need to issue
# CB AJAX ops).
footer = cElementTree.Element('div')
if can_edit:
script = cElementTree.Element('script')
script.set('src', _DRIVE_TAG_REFRESH_SCRIPT)
footer.append(script)
return (header, footer)
def _get_tag_renderer_url(self, slug, type_id, resource_id):
args = urllib.urlencode(
{'type_id': type_id, 'resource_id': resource_id})
return '%s%s?%s' % (slug, _GOOGLE_DRIVE_TAG_RENDERER_PATH, args)
class GoogleDriveRESTHandler(utils.BaseRESTHandler):
_XSRF_TOKEN_NAME = 'modules-core-tags-google-drive'
XSRF_TOKEN_REQUEST_KEY = 'xsrf_token'
@classmethod
def get_xsrf_token(cls):
return crypto.XsrfTokenManager.create_xsrf_token(cls._XSRF_TOKEN_NAME)
def put(self):
if not courses.COURSES_CAN_USE_GOOGLE_APIS.value:
self.error(404)
return
request = transforms.loads(self.request.get('request', ''))
if not self.assert_xsrf_token_or_fail(
request, self._XSRF_TOKEN_NAME, {}):
return
contents = request.get('contents')
document_id = request.get('document_id')
type_id = request.get('type_id')
if not (contents and document_id):
transforms.send_json_response(
self, 400, 'Save failed; no Google Drive item chosen.')
return
if not type_id:
transforms.send_json_response(
self, 400, 'Save failed; type_id not set')
return
key = None
try:
key = self._save_content_chunk(contents, type_id, document_id)
except Exception, e: # On purpose. pylint: disable=broad-except
transforms.send_json_response(
self, 500, 'Error when saving: %s' % e)
return
transforms.send_json_response(
self, 200, 'Success.', payload_dict={'key': str(key)})
def _save_content_chunk(self, contents, type_id, resource_id):
key = None
uid = models.ContentChunkDAO.make_uid(type_id, resource_id)
matches = models.ContentChunkDAO.get_by_uid(uid)
if not matches:
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': 'text/html',
'contents': contents,
'resource_id': resource_id,
'type_id': type_id,
}))
else:
# There is a data race in the DAO -- it's possible to create two
# entries at the same time with the same UID. If that happened,
# use the first one saved.
dto = matches[0]
dto.contents = contents
dto.content_type = 'text/html'
key = models.ContentChunkDAO.save(dto)
return key
class GoogleDriveTagRenderer(utils.BaseHandler):
def get(self):
if not courses.COURSES_CAN_USE_GOOGLE_APIS.value:
self.error(404)
return
resource_id = self.request.get('resource_id')
type_id = self.request.get('type_id')
if not (resource_id and type_id):
self._handle_error(400, 'Bad request')
return
matches = models.ContentChunkDAO.get_by_uid(
models.ContentChunkDAO.make_uid(type_id, resource_id))
if not matches:
self._handle_error(404, 'Content chunk not found')
return
# There is a data race in the DAO -- it's possible to create two entries
# at the same time with the same UID. If that happened, use the first
# one saved.
chunk = matches[0]
template = jinja_utils.get_template(
'drive_item.html', [_TEMPLATES_ABSPATH])
self.response.out.write(template.render({'contents': chunk.contents}))
def _handle_error(self, code, message):
template = jinja_utils.get_template(
'drive_error.html', [_TEMPLATES_ABSPATH])
self.error(code)
self.response.out.write(template.render({
'code': code,
'message': message,
}))
class GoogleSpreadsheet(CoreTag):
"""Custom tag for a Google Spreadsheet."""
@classmethod
def name(cls):
return 'Google Spreadsheet'
def render(self, node, unused_handler):
height = node.attrib.get('height') or '300'
link = node.attrib.get('link')
url = _escape_url('%s&chrome=false' % link.split('&output')[0])
iframe = cElementTree.XML("""
<iframe class="google-spreadsheet" title="Google Spreadsheet" type="text/html"
frameborder="0">
</iframe>""")
iframe.set('src', url)
iframe.set('style', 'width: %spx; height: %spx' % (700, height))
return iframe
def get_icon_url(self):
return self.create_icon_url('spreadsheets.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleSpreadsheet.name())
reg.add_property(
# To get this value, users do File > Publish to the web..., click
# 'Start publishing', and then copy and paste the link above 'Copy
# and paste the link above'. Changes to the publication status of a
# document or to its contents do not appear instantly.
schema_fields.SchemaField(
'link', 'Link', 'string',
optional=True,
description=('Provide the link from the Google Spreadsheets '
'"Publish to the web" dialog')))
reg.add_property(
schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '300'},
description=('Height of the spreadsheet, in pixels. Width will '
'be set automatically')))
return reg
class YouTube(CoreTag):
@classmethod
def name(cls):
return 'YouTube Video'
def render(self, node, unused_handler):
video_id = node.attrib.get('videoid')
if utils.CAN_PERSIST_TAG_EVENTS.value:
return self._render_with_tracking(video_id)
else:
return self._render_no_tracking(video_id)
def _render_no_tracking(self, video_id):
"""Embed video without event tracking support."""
you_tube_url = (
'https://www.youtube.com/embed/%s'
'?feature=player_embedded&rel=0') % video_id
iframe = cElementTree.XML("""
<div class="gcb-video-container">
<iframe class="youtube-player" title="YouTube Video Player"
type="text/html" frameborder="0" allowfullscreen="allowfullscreen">
</iframe>
</div>""")
iframe[0].set('src', you_tube_url)
return iframe
def _render_with_tracking(self, video_id):
"""Embed video and enable event tracking."""
video_id = jinja_utils.js_string_raw(video_id)
uid = common_utils.generate_instance_id()
dom = cElementTree.XML("""
<p>
<script></script>
<script></script>
</p>""")
dom.attrib['id'] = uid
dom[0].attrib['src'] = os.path.join(RESOURCE_FOLDER, 'youtube_video.js')
dom[1].text = 'gcbTagYoutubeEnqueueVideo("%s", "%s");' % (video_id, uid)
return dom
def get_icon_url(self):
return self.create_icon_url('youtube.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(YouTube.name())
reg.add_property(schema_fields.SchemaField(
'videoid', 'Video Id', 'string',
optional=True,
description='Provide YouTube video ID (e.g. Kdg2drcUjYI)'))
return reg
class Html5Video(CoreTag):
@classmethod
def name(cls):
return 'HTML5 Video'
def render(self, node, unused_handler):
if utils.CAN_PERSIST_TAG_EVENTS.value:
tracking_text = (
'<script src="' + os.path.join(
RESOURCE_FOLDER, 'html5_video.js') + '">' +
'</script>' +
'<script>' +
' gcbTagHtml5TrackVideo("%s");' % (
jinja_utils.js_string_raw(node.attrib.get('instanceid'))) +
'</script>')
else:
tracking_text = ''
video_text = (
'<div>' +
' <video></video>'
'%s' % tracking_text +
'</div>')
video = cElementTree.XML(video_text)
video[0].set('id', node.attrib.get('instanceid'))
video[0].set('src', node.attrib.get('url'))
if node.attrib.get('width'):
video[0].set('width', node.attrib.get('width'))
if node.attrib.get('height'):
video[0].set('height', node.attrib.get('height'))
video[0].set('controls', 'true')
return video
def get_icon_url(self):
return self.create_icon_url('html5-badge-h-solo.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(Html5Video.name())
reg.add_property(
schema_fields.SchemaField(
'url', 'Video URL', 'url',
optional=False,
description='URL of the video. Note that playing a video'
'from Google Docs is supported; add "&export=download". E.g.,'
'https://docs.google.com/a/google.com/uc?authuser=0'
'&id=0B82t9jeypLokMERMQ1g5Q3NFU1E&export=download'))
reg.add_property(schema_fields.SchemaField(
'width', 'Width', 'integer',
optional=True,
description='Width, in pixels.'))
reg.add_property(schema_fields.SchemaField(
'height', 'Height', 'integer',
optional=True,
description='Height, in pixels.'))
return reg
class GoogleGroup(CoreTag):
@classmethod
def name(cls):
return 'Google Group'
def render(self, node, handler):
# Note: in Firefox, this component requires a full hostname to work.
# If you are working in the development environment and are accessing
# this component at localhost, please replace 'localhost' with
# '127.0.0.1' instead.
_, netloc, _, _, _ = urlparse.urlsplit(handler.request.uri)
parent_url_suffix = ''
if (appengine_config.PRODUCTION_MODE or
not netloc.startswith('localhost')):
parent_url_suffix = (
'?parenturl=%s' % urllib.quote(handler.request.uri, safe=''))
group_name = node.attrib.get('group')
category_name = node.attrib.get('category')
embedded_forum_url = (
'https://groups.google.com/forum/embed/%s#!categories/%s/%s' % (
parent_url_suffix,
urllib.quote(group_name),
urllib.quote(category_name)
))
iframe = cElementTree.XML("""
<p>
<iframe class="forum-embed" title="Google Group Embed"
type="text/html" width="700" height="300" frameborder="0">
</iframe>
</p>""")
iframe[0].set('src', embedded_forum_url)
return iframe
def get_icon_url(self):
return self.create_icon_url('forumembed.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(GoogleGroup.name())
reg.add_property(schema_fields.SchemaField(
'group', 'Group Name', 'string', optional=True, i18n=False,
description='Name of the Google Group (e.g. mapping-with-google)'))
reg.add_property(schema_fields.SchemaField(
'category', 'Category Name', 'string', optional=True, i18n=False,
description='Name of the Category (e.g. unit5-2-annotation)'))
return reg
class IFrame(CoreTag):
def render(self, node, unused_handler):
src = node.attrib.get('src')
title = node.attrib.get('title')
height = node.attrib.get('height') or '400'
width = node.attrib.get('width') or '650'
iframe = cElementTree.XML(
'<iframe style="border: 0;"></iframe>'
)
iframe.set('src', _escape_url(src, force_https=False))
iframe.set('title', title)
iframe.set('width', width)
iframe.set('height', height)
return iframe
def get_icon_url(self):
return self.create_icon_url('iframe.png')
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(IFrame.name())
reg.add_property(schema_fields.SchemaField(
'src', 'Source URL', 'string',
optional=True,
description='Provide source URL for iframe (including http/https)'))
reg.add_property(schema_fields.SchemaField(
'title', 'Title', 'string',
optional=True,
description='Provide title of iframe'))
reg.add_property(schema_fields.SchemaField(
'height', 'Height', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '400'},
description=('Height of the iframe')))
reg.add_property(schema_fields.SchemaField(
'width', 'Width', 'string', i18n=False,
optional=True,
extra_schema_dict_values={'value': '650'},
description=('Width of the iframe')))
return reg
class Include(CoreTag):
def render(self, node, handler):
template_path = re.sub('^/+', '', node.attrib.get('path'))
base_path = os.path.dirname(template_path)
base_file = os.path.basename(template_path)
handler.init_template_values(handler.app_context.get_environ())
handler.template_value['base_path'] = base_path
html_text = handler.render_template_to_html(
handler.template_value, base_file,
additional_dirs=[
os.path.join(appengine_config.BUNDLE_ROOT, 'views'),
appengine_config.BUNDLE_ROOT,
os.path.join(appengine_config.BUNDLE_ROOT, base_path),
])
return tags.html_string_to_element_tree(html_text)
def get_icon_url(self):
return self.create_icon_url('include.png')
def get_schema(self, handler):
expected_prefix = os.path.join(appengine_config.BUNDLE_ROOT,
'assets/html')
select_data = []
if handler:
all_files = handler.app_context.fs.list(expected_prefix,
include_inherited=True)
for name in all_files:
if name.startswith(expected_prefix):
name = name.replace(appengine_config.BUNDLE_ROOT, '')
select_data.append(
(name, name.replace('/assets/html/', '')))
reg = schema_fields.FieldRegistry(Include.name())
reg.add_property(schema_fields.SchemaField(
'path', 'File Path', 'string', optional=False,
select_data=select_data,
description='Select a file from within assets/html. '
'The contents of this file will be inserted verbatim '
'at this point. Note: HTML files for inclusion may '
'also be uploaded as assets.'))
return reg
class Markdown(tags.ContextAwareTag, CoreTag):
@classmethod
def name(cls):
return 'Markdown'
@classmethod
def extra_js_files(cls):
"""Returns a list of JS files to be loaded in the editor lightbox."""
if oeditor.CAN_HIGHLIGHT_CODE.value:
return ['markdown_popup.js']
else:
return []
@classmethod
def additional_dirs(cls):
return [os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'core_tags', 'resources')]
def get_icon_url(self):
return self.create_icon_url('markdown.png')
def render(self, node, context):
# The markdown is "text" type in the schema and so is presented in the
# tag's body.
html = ''
if node.text:
html = markdown.markdown(node.text)
return tags.html_string_to_element_tree(
'<div class="gcb-markdown">%s</div>' % html)
def rollup_header_footer(self, context):
"""Include markdown css only when markdown tag is present."""
header = tags.html_string_to_element_tree(
'<link href="%s/markdown.css" rel="stylesheet" '
'type="text/css">' % RESOURCE_FOLDER)
footer = tags.html_string_to_element_tree('')
return (header, footer)
def get_schema(self, unused_handler):
reg = schema_fields.FieldRegistry(Markdown.name())
reg.add_property(schema_fields.SchemaField(
'markdown', 'Markdown', 'text', optional=False,
description='Provide '
'<a target="_blank" '
'href="http://daringfireball.net/projects/markdown/syntax">'
'markdown</a> text'))
return reg
custom_module = None
def register_module():
"""Registers this module in the registry."""
custom_tags = [
GoogleDoc, GoogleDrive, GoogleSpreadsheet, YouTube, Html5Video,
GoogleGroup, IFrame, Include, Markdown]
def make_binding_name(custom_tag):
return 'gcb-%s' % custom_tag.__name__.lower()
def on_module_disable():
for custom_tag in custom_tags:
tags.Registry.remove_tag_binding(make_binding_name(custom_tag))
# Unregsiter extra libraries required by GoogleDrive
GoogleDrive.on_unregister()
def on_module_enable():
for custom_tag in custom_tags:
tags.Registry.add_tag_binding(
make_binding_name(custom_tag), custom_tag)
# Register extra libraries required by GoogleDrive
GoogleDrive.on_register()
global custom_module # pylint: disable=global-statement
global_routes = [(
os.path.join(RESOURCE_FOLDER, '.*'), tags.ResourcesHandler)]
namespaced_routes = [
(_GOOGLE_DRIVE_TAG_PATH, GoogleDriveRESTHandler),
(_GOOGLE_DRIVE_TAG_RENDERER_PATH, GoogleDriveTagRenderer),
]
custom_module = custom_modules.Module(
'Core Custom Tags Module',
'A module that provides core custom tags.',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enable,
notify_module_disabled=on_module_disable)
return custom_module
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enable periodic transmission of DB and job-produced content to BigQuery."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
import base64
import collections
import copy
import datetime
import logging
import os
import random
import re
import time
import urllib
import apiclient
import apiclient.discovery
import httplib2
import oauth2client
import oauth2client.client
from common import catch_and_log
from common import crypto
from common import schema_fields
from common import utils as common_utils
from controllers import sites
from controllers import utils
from models import analytics
from models import courses
from models import custom_modules
from models import data_sources
from models import jobs
from models import roles
from models import transforms
from modules.dashboard import dashboard
from modules.dashboard import tabs
from google.appengine.ext import db
from google.appengine.ext import deferred
# CourseBuilder setup strings
XSRF_ACTION_NAME = 'data_pump'
DASHBOARD_ACTION = 'data_pump'
# Separate permission to be able to push user data delegable to non-super-users
ACCESS_PERMISSION = 'push_data'
ACCESS_PERMISSION_DESCRIPTION = 'Can push user data outside CourseBuilder.'
# Connection parameters for discovering and auth to BigQuery.
BIGQUERY_RW_SCOPE = 'https://www.googleapis.com/auth/bigquery'
BIGQUERY_API_NAME = 'bigquery'
BIGQUERY_API_VERSION = 'v2'
# API endpoint for initiating a retryable upload.
BIGQUERY_API_UPLOAD_URL_PREFIX = (
'https://www.googleapis.com/upload/bigquery/v2/projects/')
# UI for BigQuery interactive queries
BIGQUERY_UI_URL_PREFIX = 'https://bigquery.cloud.google.com/table/'
# Max of about 20 min of retries (random exponential backoff from 2^1...2^MAX)
MAX_CONSECUTIVE_FAILURES = 10
MAX_RETRY_BACKOFF_SECONDS = 600
# Config for secret
PII_SECRET_LENGTH = 20
PII_SECRET_DEFAULT_LIFETIME = '30 days'
# Constants for accessing job context settings map
UPLOAD_URL = 'upload_url'
LAST_START_OFFSET = 'last_start_offset'
LAST_END_OFFSET = 'last_end_offset'
LAST_PAGE_SENT = 'last_page_sent'
LAST_PAGE_NUM_ITEMS = 'last_page_num_items'
CONSECUTIVE_FAILURES = 'consecutive_failures'
FAILURE_REASON = 'failure_reason'
ITEMS_UPLOADED = 'items_uploaded'
PII_SECRET = 'pii_secret'
# Constants for items within course settings schema
DATA_PUMP_SETTINGS_SCHEMA_SECTION = 'data_pump'
PROJECT_ID = 'project_id'
DATASET_NAME = 'dataset_name'
JSON_KEY = 'json_key'
TABLE_LIFETIME = 'table_lifetime'
PII_ENCRYPTION_TOKEN = 'pii_encryption_token'
# Discovery service lookup retries constants
DISCOVERY_SERVICE_MAX_ATTEMPTS = 10
DISCOVERY_SERVICE_RETRY_SECONDS = 2
def _get_data_source_class_by_name(name):
source_classes = data_sources.Registry.get_rest_data_source_classes()
for source_class in source_classes:
if source_class.__name__ == name and source_class.exportable():
return source_class
names = [source_class.__name__ for source_class in source_classes]
logging.critical(
'No entry found for data source class with name "%s". '
'Available names are: %s', name, ' '.join(names))
return None
class DataPumpJob(jobs.DurableJobBase):
@staticmethod
def get_description():
"""Job to push data from CourseBuilder to BigQuery.
The job operates from the deferred queue, and takes advantage of the
underlying TaskQueue retry and backoff support. One job is created
for each DataSource (see models/data_source). This job moves data
from the paginated data source up to Google BigQuery via the
retryable POST method.
Jobs here run on the TaskQueue named "default along with all other
CB deferred tasks because that queue has a reasonable set of config
parameters. However, there is nothing about these jobs that
requires interleaving with others if queue parameters need to be
tuned. Functional tests will need to be changed to have
execute_all_deferred_tasks() pass the name of the new queue.
"""
def __init__(self, app_context, data_source_class_name,
no_expiration_date=False, send_uncensored_pii_data=False):
if not _get_data_source_class_by_name(data_source_class_name):
raise ValueError(
'No such data source "%s", or data source is not marked '
'as exportable.' % data_source_class_name)
super(DataPumpJob, self).__init__(app_context)
self._data_source_class_name = data_source_class_name
self._job_name = 'job-datapump-%s-%s' % (self._data_source_class_name,
self._namespace)
self._no_expiration_date = no_expiration_date
self._send_uncensored_pii_data = send_uncensored_pii_data
def non_transactional_submit(self):
"""Callback used when UI gesture indicates this job should start."""
sequence_num = super(DataPumpJob, self).non_transactional_submit()
deferred.defer(self.main, sequence_num)
return sequence_num
def _mark_job_canceled(self, job, message, duration):
"""Override default behavior of setting job.output to error string."""
if job.output:
job_context, data_source_context = self._load_state(
job, job.sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = self._build_data_source_context()
job_context[FAILURE_REASON] = message
self._save_state(jobs.STATUS_CODE_FAILED, job, job.sequence_num,
job_context, data_source_context,
use_transaction=False)
def _build_data_source_context(self):
"""Set up context class specific to data source type we pull from."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
# TODO(mgainer): if we start getting timeout failures, perhaps learn
# proper chunk size from history, rather than using default.
default_chunk_size = data_source_class.get_default_chunk_size()
ret = context_class.build_blank_default({}, default_chunk_size)
if hasattr(ret, 'send_uncensored_pii_data'):
ret.send_uncensored_pii_data = self._send_uncensored_pii_data
return ret
def _build_job_context(self, upload_url, pii_secret):
"""Set up context object used to maintain this job's internal state."""
job_context = {
UPLOAD_URL: upload_url,
LAST_START_OFFSET: 0,
LAST_END_OFFSET: -1,
LAST_PAGE_SENT: -1,
LAST_PAGE_NUM_ITEMS: 0,
CONSECUTIVE_FAILURES: [],
FAILURE_REASON: '',
ITEMS_UPLOADED: 0,
PII_SECRET: pii_secret,
}
return job_context
def _load_state(self, job, sequence_num):
if job.sequence_num != sequence_num:
raise ValueError(
'Abandoning stale job with sequence %d; '
'there is a new job with sequence %d running.' % (
sequence_num, job.sequence_num))
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
content = transforms.loads(job.output)
job_context = content['job_context']
data_source_context_class = data_source_class.get_context_class()
data_source_context = data_source_context_class.build_from_dict(
content['data_source_context'])
return job_context, data_source_context
def _save_state(self, state, job, sequence_num, job_context,
data_source_context, use_transaction=True):
# Job context may have been made with blank values for these two items.
# Recover them from the previous context if they are not set (and if
# the previous context is present enough to have them)
try:
prev_job_context, _ = self._load_state(job, sequence_num)
if not job_context[PII_SECRET]:
job_context[PII_SECRET] = prev_job_context[PII_SECRET]
if not job_context[UPLOAD_URL]:
job_context[UPLOAD_URL] = prev_job_context[UPLOAD_URL]
except (ValueError, AttributeError):
pass
# Convert data source context object to plain dict.
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
data_source_context_dict = context_class.save_to_dict(
data_source_context)
# Set job object state variables.
now = datetime.datetime.now()
job.output = transforms.dumps({
'job_context': job_context,
'data_source_context': data_source_context_dict,
})
job.status_code = state
job.execution_time_sec += int((now - job.updated_on).total_seconds())
job.updated_on = now
logging.info('Data pump job %s saving contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Using _update in DurableJobEntity
# pylint: disable=protected-access
if use_transaction:
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(
xg_on, jobs.DurableJobEntity._update, self._job_name,
sequence_num, job.status_code, job.output,
job.execution_time_sec)
else:
jobs.DurableJobEntity._update(self._job_name, sequence_num,
job.status_code, job.output,
job.execution_time_sec)
@classmethod
def _parse_pii_encryption_token(cls, token):
parts = token.split('/')
return (parts[0],
datetime.datetime(year=1970, month=1, day=1) +
datetime.timedelta(seconds=int(parts[1])))
@classmethod
def _is_pii_encryption_token_valid(cls, token):
try:
_, valid_until_date = cls._parse_pii_encryption_token(token)
return valid_until_date > datetime.datetime.now()
except ValueError:
return False
@classmethod
def _build_new_pii_encryption_token(cls, timedelta_string):
hmac_secret = base64.urlsafe_b64encode(
os.urandom(int(PII_SECRET_LENGTH * 0.75)))
table_lifetime_seconds = common_utils.parse_timedelta_string(
timedelta_string).total_seconds()
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
now = datetime.datetime.now()
table_lifetime_timedelta = datetime.timedelta(
seconds=table_lifetime_seconds)
valid_until_timestamp = int(
(now - unix_epoch + table_lifetime_timedelta).total_seconds())
pii_encryption_token = '%s/%d' % (hmac_secret,
valid_until_timestamp)
return pii_encryption_token
@classmethod
def _get_pii_token(cls, app_context):
"""Retrieve or generate and save a secret used to encrypt exported PII.
All PII data in objects exported to BigQuery is either suppressed
or transformed via a one-way hash using a secret value. The point
of the transformation is so that exported data cannot trivially be
correlated to any individual's data in CourseBuilder, but records
in exported data encoded using the same key can. (E.g., a user_id
is the key for students; this key should be usable to correlate a
user's language preference with his test scores.)
Once data has been exported from CourseBuilder to BigQuery, the
internal permissions from CourseBuilder no longer apply. To minimize
the ability of those with access to the data to perform long-term
correlations that might identify individuals, the secret used to
encode PII is automatically rotated on a period determined by the
course settings. We re-use the expiration period for tables, or
default to 30 days if no period is selected.
The format for the stored setting is a string composed of:
- A randomly-generated secret encoded as a base-64 string
- A slash character ('/')
- A Unix timestamp indicating the expiration date of the token.
The expiration date approach is chosen so that within the expiration
period, different data sources can be re-exported multiple times, but
still correlated with one another in BigQuery. Upon expiration, a
new token is generated and used. Data exported before and after the
changeover cannot be directly correlated. (It may be possible to
force a correlation if old versions of the data tables were downloaded
by comparing non-key fields in the old/new versions, if the non-key
fields are sufficiently discriminative)
Args:
app_context: Standard CB application context object.
Returns:
Secret string used for encoding PII data upon export.
"""
course_settings = app_context.get_environ()
pump_settings = course_settings.get(DATA_PUMP_SETTINGS_SCHEMA_SECTION,
{})
pii_encryption_token = pump_settings.get(PII_ENCRYPTION_TOKEN)
if (not pii_encryption_token or
not cls._is_pii_encryption_token_valid(pii_encryption_token)):
# If table_lifetime is missing OR is set to the empty string,
# prefer the default value.
lifetime = (pump_settings.get(TABLE_LIFETIME) or
PII_SECRET_DEFAULT_LIFETIME)
pii_encryption_token = cls._build_new_pii_encryption_token(lifetime)
pump_settings[PII_ENCRYPTION_TOKEN] = pii_encryption_token
course = courses.Course(None, app_context=app_context)
course.save_settings(course_settings)
return pii_encryption_token
@classmethod
def _get_pii_secret(cls, app_context):
secret, _ = cls._parse_pii_encryption_token(
cls._get_pii_token(app_context))
return secret
def _get_bigquery_settings(self, app_context):
"""Pull settings necessary for using BigQuery from DB.
This is nice and verbose and paranoid, so that if there is any
misconfiguration, the end-user gets a nice message that's specific
about the particular problem, rather than just a KeyError or
ValueError.
Args:
app_context: The standard app context for the course in question.
Returns:
A namedtuple containing private_key, client_email, project_id
and dataset_id members. The first three are required to connect
to BigQuery, and the last is the dataset within BigQuery to
which the data pump will restrict itself for insert/write/delete
operations.
Raises:
ValueError: if any expected element is missing or malformed.
"""
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
dataset_id = (
pump_settings.get(DATASET_NAME) or
re.sub('[^0-9a-z_:-]', '', app_context.get_slug().lower()) or
'course')
project_id = pump_settings.get(PROJECT_ID)
if not project_id:
raise ValueError('Cannot pump data without a course settings value '
'for the target Google BigQuery project ID')
json_key = pump_settings.get(JSON_KEY)
if not json_key:
raise ValueError('Cannot pump data without a JSON client key '
'allowing access to the target Google BigQuery '
'project')
try:
json_key = transforms.loads(json_key)
except ValueError:
raise ValueError('Cannot decode JSON client key for the target '
'Google BigQuery project.')
if 'private_key' not in json_key or 'client_email' not in json_key:
raise ValueError('The JSON client key for the target Google '
'BigQuery project does not seem to be well '
'formed; either the "private_key" or '
'"client_email" field is missing.')
# If table_lifetime setting is missing OR is set to the empty string,
# prefer the default value.
table_lifetime_seconds = common_utils.parse_timedelta_string(
pump_settings.get(TABLE_LIFETIME) or PII_SECRET_DEFAULT_LIFETIME
).total_seconds()
Settings = collections.namedtuple('Settings', [
'private_key', 'client_email', PROJECT_ID, 'dataset_id',
'table_lifetime_seconds'])
return Settings(json_key['private_key'], json_key['client_email'],
project_id, dataset_id, table_lifetime_seconds)
def _get_bigquery_service(self, bigquery_settings):
"""Get BigQuery API client plus HTTP client with auth credentials."""
credentials = oauth2client.client.SignedJwtAssertionCredentials(
bigquery_settings.client_email, bigquery_settings.private_key,
BIGQUERY_RW_SCOPE)
http = httplib2.Http()
http = credentials.authorize(http)
# Discovery.build has a timeout that's a little too aggressive. Since
# this happens before we even have our job_context built, any errors
# returned from here will be fatal. Since that's the case, add some
# extra forgiveness here by retrying several times, with a little bit
# of wait thrown in to allow the discovery service to recover, in case
# it really is just having a bad few moments.
attempts = 0
while True:
try:
return apiclient.discovery.build(
BIGQUERY_API_NAME, BIGQUERY_API_VERSION, http=http), http
# pylint: disable=broad-except
except Exception, ex:
attempts += 1
if attempts >= DISCOVERY_SERVICE_MAX_ATTEMPTS:
raise
logging.warning(
'Ignoring HTTP connection timeout %d of %d',
attempts, DISCOVERY_SERVICE_MAX_ATTEMPTS)
time.sleep(DISCOVERY_SERVICE_RETRY_SECONDS)
def _maybe_create_course_dataset(self, service, bigquery_settings):
"""Create dataset within BigQuery if it's not already there."""
datasets = service.datasets()
try:
datasets.get(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
datasets.insert(projectId=bigquery_settings.project_id,
body={
'datasetReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id
}}).execute()
def _maybe_delete_previous_table(self, tables, bigquery_settings,
data_source_class):
"""Delete previous version of table for data source, if it exists."""
# TODO(mgainer): Make clobbering old table and replacing optional.
# For now, we assume people will be writing queries in terms of
# a single table name, and will be irritated at having to change
# their queries all the time if we add a timestamp to the table
# name. And no, AFAICT, the BigQuery API does not permit renaming
# of tables, just creation and deletion.
table_name = data_source_class.get_name()
try:
tables.delete(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
tableId=table_name).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
def _json_schema_member_to_bigquery_schema(self, name, structure):
item = {'name': name}
if 'description' in structure:
item['description'] = structure['description']
if 'properties' in structure: # It's a sub-registry.
item['type'] = 'RECORD'
item['mode'] = 'NULLABLE'
item['fields'] = self._json_schema_to_bigquery_schema(
structure['properties'])
elif 'items' in structure: # It's an array
if 'items' in structure['items']:
raise ValueError(
'BigQuery schema descriptions do not support nesting '
'arrays directly in other arrays. Instead, nest '
'structures in arrays; those structures may contain '
'sub-arrays. Problem arises trying to pump data for %s' %
self._data_source_class_name)
item = self._json_schema_member_to_bigquery_schema(
name, structure['items'])
item['mode'] = 'REPEATED'
else:
item['mode'] = ('NULLABLE' if structure.get('optional')
else 'REQUIRED')
if structure['type'] in ('string', 'text', 'html', 'url', 'file'):
item['type'] = 'STRING'
elif structure['type'] in 'integer':
item['type'] = 'INTEGER'
elif structure['type'] in 'number':
item['type'] = 'FLOAT'
elif structure['type'] in 'boolean':
item['type'] = 'BOOLEAN'
elif structure['type'] in ('date', 'datetime', 'timestamp'):
# BigQuery will accept ISO-formatted datetimes as well as
# integer seconds-since-epoch as timestamps.
item['type'] = 'TIMESTAMP'
else:
raise ValueError(
'Unrecognized schema scalar type "%s" '
'when trying to make schema for data-pumping %s' % (
structure['type'], self._data_source_class_name))
return item
def _json_schema_to_bigquery_schema(self, json_schema_dict):
fields = []
for name, structure in json_schema_dict.iteritems():
fields.append(self._json_schema_member_to_bigquery_schema(
name, structure))
return fields
def _create_data_table(self, tables, bigquery_settings, schema,
data_source_class):
"""Instantiate and provide schema for new BigQuery table."""
table_name = data_source_class.get_name()
request = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'schema': {'fields': schema}
}
# If user has requested it, set the time at which table should be
# reclaimed (as milliseconds since Unix epoch).
if (bigquery_settings.table_lifetime_seconds and
not self._no_expiration_date):
now = datetime.datetime.utcnow()
expiration_delta = datetime.timedelta(
seconds=bigquery_settings.table_lifetime_seconds)
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
expiration_ms = int(
(now + expiration_delta - unix_epoch).total_seconds()) * 1000
request['expirationTime'] = expiration_ms
# Allow exceptions from here to propagate; we don't expect any problems,
# so if we have any, the upload should abort.
tables.insert(
projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
body=request).execute()
def _create_upload_job(self, http, bigquery_settings, data_source_class):
"""Before uploading, we must create a job to handle the upload.
Args:
http: An HTTP client object configured to send our auth token
bigquery_settings: Configs for talking to bigquery.
Returns:
URL specific to this upload job. Subsequent PUT requests to send
pages of data must be sent to this URL.
Raises:
Exception: on unexpected responses from BigQuery API.
"""
uri = '%s%s/jobs?uploadType=resumable' % (
BIGQUERY_API_UPLOAD_URL_PREFIX, bigquery_settings.project_id)
headers = {
'Content-Type': 'application/json',
'X-Upload-Content-Type': 'application/octet-stream',
}
table_name = data_source_class.get_name()
body = transforms.dumps({
'kind': 'bigquery#job',
'configuration': {
'load': {
'createDisposition': 'CREATE_NEVER', # Already exists.
'destinationTable': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'ignoreUnknownValues': False,
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
}
}
})
response, content = http.request(uri, method='POST',
body=body, headers=headers)
if int(response.get('status', 0)) != 200:
raise Exception('Got non-200 response when trying to create a '
'new upload job. Reponse was: "%s"; content '
'was "%s"' % (str(response), str(content)))
location = response.get('location')
if not location:
raise Exception('Expected response to contain a "location" item '
'giving a URL to send subsequent content to, but '
'instead got "%s"' % str(response))
return location
def _initiate_upload_job(self, bigquery_service, bigquery_settings, http,
app_context, data_source_context):
"""Coordinate table cleanup, setup, and initiation of upload job."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
table_schema = data_source_class.get_schema(app_context, catch_and_log_,
data_source_context)
schema = self._json_schema_to_bigquery_schema(table_schema)
tables = bigquery_service.tables()
self._maybe_create_course_dataset(bigquery_service, bigquery_settings)
self._maybe_delete_previous_table(tables, bigquery_settings,
data_source_class)
self._create_data_table(tables, bigquery_settings, schema,
data_source_class)
upload_url = self._create_upload_job(http, bigquery_settings,
data_source_class)
return upload_url
def _note_retryable_failure(self, message, job_context):
"""Log a timestamped message into the job context object."""
timestamp = datetime.datetime.now().strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
job_context[CONSECUTIVE_FAILURES].append(timestamp + ' ' + message)
def _randomized_backoff_timeout(self, job_context):
num_failures = len(job_context[CONSECUTIVE_FAILURES])
if not num_failures:
return 0
return min(MAX_RETRY_BACKOFF_SECONDS,
random.randrange(2 ** num_failures, 2 ** (num_failures + 1)))
def _check_upload_state(self, http, job_context):
"""Check with the BigQuery upload server to get state of our upload.
Due to various communication failure cases, we may not be aware of
the actual state of the upload as known to the server. Issue a blank
PUT request to evoke a response that will indicate:
- How far along we are in the upload
- Whether the upload has already completed
- Whether the upload job has taken too long and expired
Args:
http: An HTTP client object configured to send our auth token
job_context: Hash containing configuration for this upload job.
Returns:
A 2-tuple of next page to load (or None if no page should be
loaded), and the next jobs.STATUS_CODE_<X> to transition to.
"""
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
headers={'Content-Range': 'bytes */*'})
return self._handle_put_response(response, job_context, is_upload=False)
def _send_data_page_to_bigquery(self, data, is_last_chunk, next_page,
http, job, sequence_num, job_context,
data_source_context):
if next_page == 0 and is_last_chunk and not data:
return jobs.STATUS_CODE_COMPLETED
# BigQuery expects one JSON object per newline-delimed record,
# not a JSON array containing objects, so convert them individually.
# Less efficient, but less hacky than converting and then string
# manipulation.
lines = []
total_len = 0
for item in data:
line = transforms.dumps(item)
line += '\n'
total_len += len(line)
lines.append(line)
# Round data size up to next multiple of 256K, per
# https://cloud.google.com/bigquery/loading-data-post-request#chunking
padding_amount = 0
if not is_last_chunk:
round_to = 256 * 1024
if total_len % round_to:
padding_amount = round_to - (total_len % round_to)
lines.append(' ' * padding_amount)
payload = ''.join(lines)
# We are either re-attempting to send a page, or sending a new page.
# Adjust the job_context's last-sent state to reflect this.
job_context[LAST_PAGE_NUM_ITEMS] = len(data)
if next_page == job_context[LAST_PAGE_SENT]:
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
elif next_page == job_context[LAST_PAGE_SENT] + 1:
job_context[LAST_PAGE_SENT] = next_page
job_context[LAST_START_OFFSET] = (
job_context[LAST_END_OFFSET] + 1)
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
else:
raise Exception(
'Internal error - unexpected condition in sending page. '
'next_page=%d last_page=%d, num_items=%d' % (
next_page, job_context[LAST_PAGE_SENT], len(data)))
logging.info(
'Sending to BigQuery. %d items; %d padding bytes; is-last: %s',
len(data), padding_amount, str(is_last_chunk))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
(job_context[LAST_END_OFFSET] + 1) if is_last_chunk else '*')
}
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
body=payload, headers=headers)
_, next_state = self._handle_put_response(response, job_context,
is_upload=True)
return next_state
def _handle_put_response(self, response, job_context, is_upload=True):
"""Update job_context state depending on response from BigQuery."""
status = int(response['status'])
logging.info('Response from bigquery: %d; %s', status, str(response))
next_page = None
next_status = jobs.STATUS_CODE_STARTED
if status == 308:
# Google's push-partial-data usurps the usual meaning of 308 to
# instead mean "partial request incomplete"; here, it's telling
# us that the request has partially completed, and it will give
# us a Range: header to indicate how far it thinks we've gone.
# We only care about the upper end of the range.
if 'range' not in response:
last_offset_received = -1
else:
last_offset_received = int(response['range'].split('-')[1])
if last_offset_received == job_context[LAST_END_OFFSET]:
# The nominal case; the reported index of the last byte
# received exactly matches what we think we sent. Tell our
# caller we are ready to try the next page, and count up
# the total number of items sent only now that we have seen
# the receiving side's acknowledgement.
next_page = job_context[LAST_PAGE_SENT] + 1
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
# Don't clear the list of failures if this is handling the
# pre-check done before uploading. Experiments show that
# persistent problems with our requests result in 503's on
# upload, but 308's (reporting no progress made) on check.
# We want to eventually fail out if we're constantly getting
# errors, so ignore the "success" on checking status.
if is_upload:
job_context[CONSECUTIVE_FAILURES] = []
elif (last_offset_received >= job_context[LAST_START_OFFSET] - 1 and
last_offset_received < job_context[LAST_END_OFFSET]):
# If the last offset received is not the same as the last offset
# sent, that's possibly OK; verify that the last offset received
# is sane. Here, "sane" means that we accept seeing the
# last offset of the previous page sent (last_start_offset-1)
# up to, but not including the last_end_offset (for the page
# we just sent). Anything lower means that our algorithm
# mistakenly skipped past a failure. Anything higher means
# that we have somehow become confused and decided to step
# backward (or BigQuery is lying to us).
prev_page_size = (job_context[LAST_END_OFFSET] -
job_context[LAST_START_OFFSET] + 1)
bytes_received = (last_offset_received -
job_context[LAST_START_OFFSET] + 1)
self._note_retryable_failure(
'Incomplete upload detected - %d of %d bytes received '
'for page %d' %
(bytes_received, prev_page_size,
job_context[LAST_PAGE_SENT]), job_context)
next_page = job_context[LAST_PAGE_SENT]
else:
raise ValueError(
'Uploaded byte count of %d does not fall in the range '
'%d to %d, the start/end range for previously-sent page '
'number %d. Abandoning upload.' % (
last_offset_received, job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
job_context[LAST_PAGE_SENT]))
elif status in (200, 201):
# BigQuery confirms that it has seen the upload complete. (Note
# that this is *not* a promise that the upload has parsed
# correctly; there doesn't seem to be a clean way to ask about
# that other than to probe the table for number of rows uploaded
# until we see the desired number or time out. Ick.)
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
next_status = jobs.STATUS_CODE_COMPLETED
elif status == 404:
# Unlikely, but possible. For whatever reason, BigQuery has
# decided that our upload URL is no longer valid. (Docs say that
# we are allowed up to a day to get an upload done, but do not
# promise that this is the only reason a job may become invalid.)
# We need to start again from scratch. To start over, we will
# just skip uploading a data page this round, and set ourselves up
# to be called back again from the deferred-tasks queue. When the
# callback happens, STATUS_CODE_QUEUED will indicate we need to
# re-init everything from scratch.
next_status = jobs.STATUS_CODE_QUEUED
elif status in (500, 502, 503, 504):
# Server Error, Bad Gateway, Service Unavailable or Gateway Timeout.
# In all of these cases, we do a randomized exponential delay before
# retrying.
self._note_retryable_failure('Retryable server error %d' % status,
job_context)
else:
raise ValueError(
'Got unexpected status code %d from BigQuery in response %s' %
(status, str(response)))
return next_page, next_status
def _fetch_page_data(self, app_context, data_source_context, next_page):
"""Get the next page of data from the data source."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
is_last_page = False
with catch_and_log_.propagate_exceptions('Loading page of data'):
schema = data_source_class.get_schema(app_context, catch_and_log_,
data_source_context)
required_jobs = data_sources.utils.get_required_jobs(
data_source_class, app_context, catch_and_log_)
data, _ = data_source_class.fetch_values(
app_context, data_source_context, schema, catch_and_log_,
next_page, *required_jobs)
# BigQuery has a somewhat unfortunate design: It does not attempt
# to parse/validate the data we send until all data has been
# uploaded and the upload has been declared a "success". Rather
# than having to poll for an indefinite amount of time until the
# upload is parsed, we validate that the sent items exactly match
# the declared schema. Somewhat expensive, but better than having
# completely unreported hidden failures.
for index, item in enumerate(data):
complaints = transforms.validate_object_matches_json_schema(
item, schema)
if complaints:
raise ValueError(
'Data in item to pump does not match schema! ' +
'Item is item number %d ' % index +
'on data page %d. ' % next_page +
'Problems for this item are:\n' +
'\n'.join(complaints))
if (data_source_class.get_default_chunk_size() == 0 or
not hasattr(data_source_context, 'chunk_size') or
len(data) < data_source_context.chunk_size):
is_last_page = True
else:
# Here, we may have read to the end of the table and just
# happened to end up on an even chunk boundary. Attempt to
# read one more row so that we can discern whether we really
# are at the end.
# Don't use the normal data_source_context; we don't want it
# to cache a cursor for the next page that will only retrieve
# one row.
throwaway_context = copy.deepcopy(data_source_context)
throwaway_context.chunk_size = 1
next_data, actual_page = data_source_class.fetch_values(
app_context, throwaway_context, schema, catch_and_log_,
next_page + 1, *required_jobs)
if not next_data or actual_page == next_page:
is_last_page = True
return data, is_last_page
def _send_next_page(self, sequence_num, job):
"""Coordinate table setup, job setup, sending pages of data."""
# Gather necessary resources
app_context = sites.get_course_index().get_app_context_for_namespace(
self._namespace)
pii_secret = self._get_pii_secret(app_context)
bigquery_settings = self._get_bigquery_settings(app_context)
bigquery_service, http = self._get_bigquery_service(bigquery_settings)
# If this is our first call after job start (or we have determined
# that we need to start over from scratch), do initial setup.
# Otherwise, re-load context objects from saved version in job.output
if job.status_code == jobs.STATUS_CODE_QUEUED:
data_source_context = self._build_data_source_context()
upload_url = self._initiate_upload_job(
bigquery_service, bigquery_settings, http, app_context,
data_source_context)
job_context = self._build_job_context(upload_url, pii_secret)
else:
job_context, data_source_context = self._load_state(
job, sequence_num)
if hasattr(data_source_context, 'pii_secret'):
data_source_context.pii_secret = pii_secret
if self._send_uncensored_pii_data:
data_source_context.send_uncensored_pii_data = True
logging.info('Data pump job %s loaded contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Check BigQuery's state. Based on that, choose the next page of data
# to push. Depending on BigQuery's response, we may or may not be
# able to send a page now.
next_page, next_state = self._check_upload_state(http, job_context)
if next_page is not None:
data, is_last_chunk = self._fetch_page_data(
app_context, data_source_context, next_page)
next_state = self._send_data_page_to_bigquery(
data, is_last_chunk, next_page,
http, job, sequence_num, job_context, data_source_context)
self._save_state(next_state, job, sequence_num, job_context,
data_source_context)
# If we are not done, enqueue another to-do item on the deferred queue.
if len(job_context[CONSECUTIVE_FAILURES]) >= MAX_CONSECUTIVE_FAILURES:
raise Exception('Too many consecutive failures; abandoning job.')
elif not job.has_finished:
backoff_seconds = self._randomized_backoff_timeout(job_context)
logging.info('%s re-queueing for subsequent work', self._job_name)
deferred.defer(self.main, sequence_num, _countdown=backoff_seconds)
else:
logging.info('%s complete', self._job_name)
def main(self, sequence_num):
"""Callback entry point. Manage namespaces, failures; send data."""
logging.info('%s de-queued and starting work.', self._job_name)
job = self.load()
if not job:
raise deferred.PermanentTaskFailure(
'Job object for %s not found!' % self._job_name)
if job.has_finished:
return # We have been canceled; bail out immediately.
with common_utils.Namespace(self._namespace):
try:
self._send_next_page(sequence_num, job)
except Exception, ex:
common_utils.log_exception_origin()
logging.critical('%s: job abandoned due to fatal error %s',
self._job_name, str(ex))
# Log failure in job object as well.
if job.output:
job_context, data_source_context = self._load_state(
job, sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = (self._build_data_source_context())
job_context[FAILURE_REASON] = str(ex)
self._save_state(jobs.STATUS_CODE_FAILED, job, sequence_num,
job_context, data_source_context)
# PermanentTaskFailure tells deferred queue to give up on us.
raise deferred.PermanentTaskFailure('Job %s failed: %s' % (
self._job_name, str(ex)))
def get_display_dict(self, app_context):
"""Set up dict for Jinja rendering on data_pump.html."""
data_source_context = self._build_data_source_context()
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
ret = {
'name': self._data_source_class_name,
'title': data_source_class.get_title(),
'status': 'Has Never Run',
'active': False,
}
job = self.load()
if job:
ret['status'] = jobs.STATUS_CODE_DESCRIPTION[job.status_code]
ret['active'] = not job.has_finished
ret['sequence_number'] = job.sequence_num
ret['updated_on'] = job.updated_on.strftime(
utils.HUMAN_READABLE_TIME_FORMAT)
if job.has_finished:
duration = job.execution_time_sec
else:
duration = int((datetime.datetime.now() -
job.updated_on) .total_seconds())
ret['duration'] = datetime.timedelta(days=0, seconds=duration)
ret['last_updated'] = job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
bigquery_settings = self._get_bigquery_settings(app_context)
ret['bigquery_url'] = '%s%s:%s.%s' % (
BIGQUERY_UI_URL_PREFIX, bigquery_settings.project_id,
bigquery_settings.dataset_id, data_source_class.get_name())
try:
job_context, data_source_context = self._load_state(
job, job.sequence_num)
ret['job_context'] = job_context
current_secret = DataPumpJob._get_pii_secret(app_context)
if job_context[PII_SECRET] != current_secret:
ret['pii_secret_is_out_of_date'] = True
del job_context[PII_SECRET]
except (ValueError, AttributeError):
# When jobs framework catches a failure, it overwrites the
# job.output with the failure message as a string. We will
# get here if we fail to parse job.output as a JSON-packed
# object.
ret['message'] = job.output
ret['source_url'] = '%s/rest/data/%s/items?chunk_size=10' % (
app_context.get_slug(), data_source_class.get_name())
catch_and_log_ = catch_and_log.CatchAndLog()
ret['schema'] = data_source_class.get_schema(
app_context, catch_and_log_, data_source_context)
ret['generator_statuses'] = []
ret['available'] = True
ret['any_generator_running'] = False
required_generators = data_source_class.required_generators()
if not required_generators:
ret['generator_statuses'].append(
{'message': '(No dependencies)', 'link': None})
ret['has_any_generators'] = False
else:
ret['has_any_generators'] = True
for generator_class in required_generators:
generator = generator_class(app_context)
job = generator.load()
message = analytics.display.get_generator_status_message(
generator_class, job)
link = analytics.display.get_pipeline_link(
crypto.XsrfTokenManager, app_context, generator_class, job)
ret['generator_statuses'].append({'message': message, 'link': link})
if not job or job.status_code != jobs.STATUS_CODE_COMPLETED:
ret['available'] = False
if job and not job.has_finished:
ret['any_generator_running'] = True
return ret
class DataPumpJobsDataSource(data_sources.SynchronousQuery):
"""Present DataPump job status as an analytic generated at page-render time.
This is a very mild hack. Since the data pump job controls show up as a
sub-tab under Dashboard -> Analytics, the easiest way to generate tab
content is to act as though we are an analytic. And we are, in a sense -
this analytic just happens to generate a table of data-pump job statuses,
rather than analytics about student performance. This also conveniently
re-uses all the mechanics for authorization, dispatch, page-painting, etc.
"""
@staticmethod
def required_generators():
return []
@staticmethod
def fill_values(app_context, template_values):
template_values['xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token(XSRF_ACTION_NAME))
template_values['exit_url'] = urllib.urlencode({
'exit_url': 'dashboard?%s' % urllib.urlencode({
'action': 'analytics',
'tab': 'data_pump'})})
source_classes = [
ds for ds in data_sources.Registry.get_rest_data_source_classes()
if ds.exportable()]
source_classes.sort(key=lambda c: c.get_title())
# pylint: disable=protected-access
template_values['pumps'] = []
for source_class in source_classes:
job = DataPumpJob(app_context, source_class.__name__)
template_values['pumps'].append(job.get_display_dict(app_context))
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
template_values['need_settings'] = (
not pump_settings.has_key(PROJECT_ID) or
not pump_settings.has_key(JSON_KEY))
# If table_lifetime setting is missing OR is set to the empty string,
# prefer the default value.
template_values['default_lifetime'] = (
pump_settings.get(TABLE_LIFETIME) or PII_SECRET_DEFAULT_LIFETIME)
template_values[DATASET_NAME] = (
pump_settings.get(DATASET_NAME) or
re.sub('[^0-9a-z_:-]', '', app_context.get_slug().lower()) or
'course')
custom_module = None
class DashboardExtension(object):
"""Respond to UI run/cancel commands for individual data pump jobs."""
@classmethod
def register(cls):
# Register new permission for pushing student data to external location.
dashboard.DashboardHandler.add_external_permission(
ACCESS_PERMISSION, ACCESS_PERMISSION_DESCRIPTION)
# Register a new Analytics sub-tab for showing data pump status and
# start/stop buttons.
data_pump_visualization = analytics.Visualization(
'data_pumps', 'Data Pumps', 'data_pump.html',
data_source_classes=[DataPumpJobsDataSource])
tabs.Registry.register('analytics', 'data_pump', 'Data Pump',
[data_pump_visualization])
def post_action(handler):
cls(handler).post_data_pump()
dashboard.DashboardHandler.post_actions.append(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION,
post_action)
dashboard.DashboardHandler.map_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
@classmethod
def unregister(cls):
dashboard.DashboardHandler.post_actions.remove(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION, None)
dashboard.DashboardHandler.unmap_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
dashboard.DashboardHandler.remove_external_permission(ACCESS_PERMISSION)
roles.Roles.unregister_permissions(custom_module)
def post_data_pump(self):
source_name = self.handler.request.get('data_source')
data_source_class = _get_data_source_class_by_name(source_name)
if data_source_class:
action = self.handler.request.get('pump_action')
data_pump_job = DataPumpJob(
self.handler.app_context, source_name,
self.handler.request.get('no_expiration_date') == 'True',
self.handler.request.get('send_uncensored_pii_data') == 'True')
if action == 'start_pump':
data_pump_job.submit()
elif action == 'cancel_pump':
data_pump_job.cancel()
elif action == 'run_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).submit()
elif action == 'cancel_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).cancel()
self.handler.redirect(self.handler.get_action_url(
'analytics', extra_args={'tab': 'data_pump'}, fragment=source_name))
def __init__(self, handler):
self.handler = handler
def register_module():
"""Adds this module to the registry. Called once at startup."""
def validate_project_id(value, errors):
if not value:
return
if not re.match('^[a-z][-a-z0-9]{4,61}[a-z0-9]$', value):
errors.append(
'Project IDs must contain 6-63 lowercase letters, digits, '
'or dashes. IDs must start with a letter and may not end '
'with a dash.')
project_id = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PROJECT_ID,
'Project ID', 'string', validator=validate_project_id,
description='The ID (not the name!) of the Project to which to '
'send data. See the list of projects and their IDs at '
'https://console.developers.google.com/project',
i18n=False)
dataset_name = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + DATASET_NAME,
'Dataset Name', 'string',
description='Name of the BigQuery dataset to which to pump tables. '
'If not set, this will default to the name of the course.',
optional=True, i18n=False)
def validate_json_key(json_key, errors):
if not json_key:
return
try:
json_key = transforms.loads(json_key or '')
if 'private_key' not in json_key or 'client_email' not in json_key:
errors.append(
'The JSON client key for allowing access to push data '
'to BigQuery is missing either the "private_key" or '
'"client_email" field (or both). Please check that you '
'have copied the entire contents of the JSON key file '
'you downloaded using the Credentials screen in the '
'Google Developers Console.')
except ValueError, ex:
errors.append(
'The JSON key field doesn\'t seem to contain valid JSON. '
'Please check that you have copied all of the content of the '
'JSON file you downloaded using the Credentials screen in the '
'Google Developers Console. Also, be sure that you are '
'pasting in the JSON version, not the .p12 (PKCS12) file.' +
str(ex))
json_key = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + JSON_KEY,
'JSON Key', 'text',
i18n=False, validator=validate_json_key,
description='Contents of a JSON key created in the Developers Console '
'for the instance where BigQuery is to be run. See '
# TODO(mgainer): Get CB location of instructions to get client key
# for destination application.
'the instructions at ')
def validate_table_lifetime(value, errors):
if not value:
return
seconds = common_utils.parse_timedelta_string(value).total_seconds()
if not seconds:
errors.append(
'The string "%s" ' % value +
'has some problems; please check the instructions below '
'the field for instructions on accepted formats.')
table_lifetime = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + TABLE_LIFETIME,
'Table Lifetime', 'string',
optional=True, i18n=False,
validator=validate_table_lifetime,
description='Amount of time a table pushed to BigQuery will last. '
'After this amount of time, the table will be automatically deleted. '
'(This is useful if your data retention or privacy policy mandates '
'a limited time for analysis after which personal data must be '
'removed.) Leaving this field blank will use the default value '
'of "' + PII_SECRET_DEFAULT_LIFETIME + '". Supported units are: '
'"weeks", "days", "hours", "minutes", "seconds". Units may be '
'specified as their first letter, singular, or plural. Spaces '
'and commas may be used or omitted. E.g., both of the following '
'are equivalent: "3w1d7h", "3 weeks, 1 day, 7 hours"')
pii_encryption_token = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PII_ENCRYPTION_TOKEN,
'PII Encryption Token', 'string',
optional=True, i18n=False, editable=False,
description='Automatically generated encryption secret used to '
'obscure PII fields when these are pushed to BigQuery. This '
'key lasts only as long as the Table Lifetime setting above, or '
'30 days if the limit is not set. After this secret has expired, '
'a new secret will be generated. PII items with the same un-obscured '
'value which are obscured with different values for this secret will '
'have different values. Most importantly, this means that joins on '
'fields that should be the same (e.g., user ID) will not work.')
course_settings_fields = (
lambda c: project_id,
lambda c: json_key,
lambda c: dataset_name,
lambda c: table_lifetime,
lambda c: pii_encryption_token,
)
def on_module_enabled():
data_sources.Registry.register(DataPumpJobsDataSource)
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION] += course_settings_fields
tabs.Registry.register('settings', 'data_pump', 'Data Pump',
DATA_PUMP_SETTINGS_SCHEMA_SECTION)
DashboardExtension.register()
def on_module_disabled():
for field in course_settings_fields:
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION].remove(field)
DashboardExtension.unregister()
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Data Pump', 'Pushes DB and generated content to a BigQuery project',
[], [],
notify_module_enabled=on_module_enabled,
notify_module_disabled=on_module_disabled)
return custom_module
# Since this module contains a registry which may be populated from other
# modules, we here import 'main' so that we are ensured that by the time this
# module is loaded, the global code in 'main' has been run (either by this
# import, or prior). Note that we must do this import strictly after we
# declare register_module(): If this import actually runs the code in main,
# this module must have declared its own register_module() method so that the
# the registration code can see it.
# pylint: disable=unused-import
import main
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provide skill mapping of Course Builder content."""
__author__ = 'John Orr (jorr@google.com)'
import json
import jinja2
import logging
import os
import time
from collections import defaultdict
import appengine_config
from common import caching
from common import crypto
from common import resource
from common import safe_dom
from common import schema_fields
from common import tags
from controllers import lessons
from controllers import sites
from controllers import utils
from mapreduce import context
from models import analytics
from models import courses
from models import custom_modules
from models import data_sources
from models import jobs
from models import models
from models import progress
from models import resources_display
from models import roles
from models import transforms
from modules.admin.admin import WelcomeHandler
from modules import courses as courses_module
from modules.dashboard import dashboard
from modules.dashboard import tabs
from modules.dashboard.unit_lesson_editor import LessonRESTHandler
from modules.i18n_dashboard import i18n_dashboard
from modules.skill_map import skill_map_metrics
from google.appengine.ext import db
from google.appengine.api import namespace_manager
skill_mapping_module = None
# Folder where Jinja template files are stored
TEMPLATES_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'skill_map', 'templates')
# URI for skill map css, js, amd img assets.
RESOURCES_URI = '/modules/skill_map/resources'
# Key for storing list of skill id's in the properties table of a Lesson
LESSON_SKILL_LIST_KEY = 'modules.skill_map.skill_list'
def _assert(condition, message, errors):
"""Assert a condition and either log exceptions or raise AssertionError."""
if not condition:
if errors is not None:
errors.append(message)
else:
raise AssertionError(message)
class _SkillEntity(models.BaseEntity):
"""Entity to represent a single skill."""
# A JSON blob which holds the data for a single skill. It fits the
# following schema:
# {
# "name": "string name of skill",
# "description": "string description of skill",
# "prerequisites": [
# {
# "id": the id for the prerequisite SkillEntity
# },
# { ... }, ...
# ],
# "last_modified": epoch_time_sec
# }
data = db.TextProperty(indexed=False)
class Skill(object):
"""DTO to represent a single skill."""
def __init__(self, skill_id, data_dict):
self._id = skill_id
self.dict = data_dict
@classmethod
def build(cls, name, description, prerequisite_ids=None):
return Skill(None, {
'name': name,
'description': description,
'prerequisites': prerequisite_ids or []
})
@property
def id(self):
return self._id
@property
def name(self):
return self.dict.get('name')
@property
def description(self):
return self.dict.get('description')
@property
def last_modified(self):
return self.dict.get('last_modified')
@last_modified.setter
def last_modified(self, value):
self.dict['last_modified'] = value
@property
def prerequisite_ids(self):
"""Returns the id's of the prerequisites."""
return {
item.get("id") for item in self.dict.get('prerequisites', [])}
def _set_prerequisite_ids(self, prerequisite_ids):
"""Sets the id's of the prerequisites."""
self.dict['prerequisites'] = [
{'id': prerequisite_id} for prerequisite_id in prerequisite_ids]
def _on_skills_changed(skills):
if not i18n_dashboard.I18nProgressDeferredUpdater.is_translatable_course():
return
key_list = [resource.Key(ResourceSkill.TYPE, skill.id) for skill in skills]
i18n_dashboard.I18nProgressDeferredUpdater.update_resource_list(key_list)
def _translate_skill(skills_generator):
if not i18n_dashboard.is_translation_required():
return
app_context = sites.get_course_for_current_request()
course = courses.Course.get(app_context)
skills = []
key_list = []
first = True
for skill in skills_generator:
skills.append(skill)
key_list.append(resource.Key(ResourceSkill.TYPE, skill.id))
i18n_dashboard.translate_dto_list(course, skills, key_list)
class _SkillDao(models.LastModfiedJsonDao):
DTO = Skill
ENTITY = _SkillEntity
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeId
# Using hooks that are in the same file looks awkward, but it's cleaner
# than overriding all the load/store methods, and is also proof against
# future changes that extend the DAO API.
POST_LOAD_HOOKS = [_translate_skill]
POST_SAVE_HOOKS = [_on_skills_changed]
class ResourceSkill(resource.AbstractResourceHandler):
TYPE = 'skill'
@classmethod
def get_resource(cls, course, key):
return _SkillDao.load(key)
@classmethod
def get_resource_title(cls, rsrc):
return rsrc.name
@classmethod
def get_schema(cls, course, key):
prerequisite_type = schema_fields.FieldRegistry('Prerequisite')
prerequisite_type.add_property(schema_fields.SchemaField(
'id', '', 'integer', optional=True, i18n=False))
location_type = schema_fields.FieldRegistry('Location')
location_type.add_property(schema_fields.SchemaField(
'key', '', 'string', optional=True, i18n=False))
schema = schema_fields.FieldRegistry(
'Skill', description='skill')
schema.add_property(schema_fields.SchemaField(
'version', '', 'string', optional=True, hidden=True))
schema.add_property(schema_fields.SchemaField(
'name', 'Name', 'string', optional=True))
schema.add_property(schema_fields.SchemaField(
'description', 'Description', 'text', optional=True))
schema.add_property(schema_fields.FieldArray(
'prerequisites', 'Prerequisites', item_type=prerequisite_type,
optional=True))
schema.add_property(schema_fields.FieldArray(
'locations', 'Locations', item_type=location_type,
optional=True))
return schema
@classmethod
def get_data_dict(cls, course, key):
return cls.get_resource(course, key).dict
@classmethod
def get_view_url(cls, rsrc):
return None
@classmethod
def get_edit_url(cls, key):
return None
class TranslatableResourceSkill(
i18n_dashboard.AbstractTranslatableResourceType):
@classmethod
def get_ordering(cls):
return i18n_dashboard.TranslatableResourceRegistry.ORDERING_LATE
@classmethod
def get_title(cls):
return 'Skills'
@classmethod
def get_resources_and_keys(cls, course):
ret = []
for skill in _SkillDao.get_all():
ret.append(
(skill,
resource.Key(ResourceSkill.TYPE, skill.id, course)))
ret.sort(key=lambda x: x[0].name)
return ret
class SkillGraph(caching.RequestScopedSingleton):
"""Facade to handle the CRUD lifecycle of the skill dependency graph."""
def __init__(self):
# dict mapping skill id to skill
self._skills = _SkillDao.get_all_mapped()
# dict mapping skill id to list of successor SkillDTO's
self._successors = None
self._rebuild()
def _rebuild(self):
self.build_successors()
SkillMap.clear_all()
def build_successors(self):
self._successors = {}
for other in self._skills.values():
for pid in other.prerequisite_ids:
self._successors.setdefault(pid, []).append(other)
@classmethod
def load(cls):
return cls.instance()
@property
def skills(self):
"""Get a list of all the skills in this course.
Returns:
list of Skill objects.
"""
return self._skills.values()
def get(self, skill_id):
"""Get skill by ID."""
return self._skills[skill_id]
def add(self, skill, errors=None):
"""Add a skill to the skill map."""
_assert(skill.id is None, 'Skill has already been added', errors)
for prerequisite_id in skill.prerequisite_ids:
_assert(
prerequisite_id in self._skills,
'Skill has non-existent prerequisite', errors)
self._validate_unique_skill_name(skill.id, skill.name, errors)
skill_id = _SkillDao.save(skill)
new_skill = Skill(skill_id, skill.dict)
self._skills[skill_id] = new_skill
self._rebuild()
return new_skill
def update(self, sid, attributes, errors):
_assert(self.get(sid), 'Skill does not exist', errors)
# pylint: disable=protected-access
prerequisite_ids = [
x['id'] for x in attributes.get('prerequisites', [])]
for pid in prerequisite_ids:
self._validate_prerequisite(sid, pid, errors)
# No duplicate prerequisites
_assert(
len(set(prerequisite_ids)) == len(prerequisite_ids),
'Prerequisites must be unique', errors)
self._validate_unique_skill_name(sid, attributes.get('name'), errors)
if errors:
return sid
skill_id = _SkillDao.save(Skill(sid, attributes))
self._skills[skill_id] = Skill(skill_id, attributes)
self._rebuild()
return skill_id
def delete(self, skill_id, errors=None):
"""Remove a skill from the skill map."""
_assert(
skill_id in self._skills,
'Skill is not present in the skill map', errors)
successors = self.successors(skill_id)
for successor in successors:
prerequisite_ids = successor.prerequisite_ids
prerequisite_ids.remove(skill_id)
# pylint: disable=protected-access
successor._set_prerequisite_ids(prerequisite_ids)
_SkillDao.delete(self._skills[skill_id])
_SkillDao.save_all(successors)
del self._skills[skill_id]
self._rebuild()
def prerequisites(self, skill_id):
"""Get the immediate prerequisites of the given skill.
Args:
skill_id. The id of the skill to find prerequisites of.
Returns:
list of Skill.
"""
skill = self._skills[skill_id]
return [
self._skills[prerequisite_id]
for prerequisite_id in skill.prerequisite_ids]
def _validate_prerequisite(self, sid, pid, errors=None):
_assert(
sid in self._skills, 'Skill does not exist', errors)
_assert(
pid in self._skills,
'Prerequisite does not exist', errors)
# No length-1 cycles (ie skill which is its own prerequisite) allowed
_assert(
sid != pid,
'A skill cannot be its own prerequisite', errors)
def _validate_unique_skill_name(self, skill_id, name, errors):
for other_skill in self.skills:
if other_skill.id == skill_id:
continue
_assert(
name != other_skill.name, 'Name must be unique', errors)
def add_prerequisite(self, skill_id, prerequisite_skill_id, errors=None):
self._validate_prerequisite(skill_id, prerequisite_skill_id, errors)
skill = self._skills.get(skill_id)
prerequisite_skills = skill.prerequisite_ids
_assert(
prerequisite_skill_id not in prerequisite_skills,
'This prerequisite has already been set', errors)
prerequisite_skills.add(prerequisite_skill_id)
# pylint: disable=protected-access
skill._set_prerequisite_ids(prerequisite_skills)
_SkillDao.save(skill)
self._rebuild()
def delete_prerequisite(self, skill_id, prerequisite_skill_id, errors=None):
_assert(
skill_id in self._skills, 'Skill does not exist', errors)
_assert(
prerequisite_skill_id in self._skills,
'Prerequisite does not exist', errors)
skill = self._skills[skill_id]
prerequisite_skills = skill.prerequisite_ids
_assert(
prerequisite_skill_id in prerequisite_skills,
'Cannot delete an unset prerequisite.', errors)
prerequisite_skills.remove(prerequisite_skill_id)
# pylint: disable=protected-access
skill._set_prerequisite_ids(prerequisite_skills)
_SkillDao.save(skill)
self._rebuild()
def successors(self, skill_id):
"""Get the immediate successors of the given skill.
Args:
skill_id. The id of the skill to find successors of.
Returns:
list of Skill.
"""
return self._successors.get(skill_id, [])
class LocationInfo(object):
"""Info object for mapping skills to content locations."""
def __init__(self, unit, lesson):
assert lesson.unit_id == unit.unit_id
self._unit = unit
self._lesson = lesson
@property
def key(self):
return resources_display.ResourceLesson.get_key(self._lesson)
@property
def label(self):
if self._lesson.index is None:
return '%s.' % self._unit.index
return '%s.%s' % (self._unit.index, self._lesson.index)
@property
def href(self):
return 'unit?unit=%s&lesson=%s' % (
self._unit.unit_id, self._lesson.lesson_id)
@property
def edit_href(self):
return 'dashboard?action=edit_lesson&key=%s' % self._lesson.lesson_id
@property
def lesson(self):
return self._lesson
@property
def unit(self):
return self._unit
@property
def sort_key(self):
return self._unit.unit_id, self._lesson.lesson_id
@classmethod
def json_encoder(cls, obj):
if isinstance(obj, cls):
return {
'key': str(obj.key),
'label': obj.label,
'href': obj.href,
'edit_href': obj.edit_href,
'lesson': obj.lesson.title,
'unit': obj.unit.title,
'sort_key': obj.sort_key
}
return None
class SkillInfo(object):
"""Skill info object for skills with lesson and unit ids."""
def __init__(self, skill, locations=None, topo_sort_index=None):
assert skill
self._skill = skill
self._locations = locations or []
self._prerequisites = []
self._topo_sort_index = topo_sort_index
@property
def id(self):
return self._skill.id
@property
def name(self):
return self._skill.name
@property
def description(self):
return self._skill.description
@property
def prerequisites(self):
return self._prerequisites
@prerequisites.setter
def prerequisites(self, skills):
"""Sets prerequisite skills."""
self._prerequisites = skills
@property
def locations(self):
return self._locations
def set_topo_sort_index(self, topo_sort_index):
self._topo_sort_index = topo_sort_index
def sort_key(self):
if self._locations:
loc = min(sorted(self._locations, key=lambda x: x.sort_key))
return loc.unit.unit_id, loc.lesson.lesson_id
return None, None
def topo_sort_key(self):
return self.sort_key() + (self._topo_sort_index, )
@classmethod
def json_encoder(cls, obj):
if isinstance(obj, cls):
return {
'id': obj.id,
'name': obj.name,
'description': obj.description,
'prerequisite_ids': [s.id for s in obj.prerequisites],
'locations': obj.locations,
'sort_key': obj.sort_key(),
'topo_sort_key': obj.topo_sort_key()
}
return None
class SkillMapError(Exception):
pass
class SkillMap(caching.RequestScopedSingleton):
"""Provides API to access the course skill map."""
def __init__(self, skill_graph, course):
self._rebuild(skill_graph, course)
def _rebuild(self, skill_graph, course):
self._skill_graph = skill_graph
self._course = course
self._units = dict([(u.unit_id, u) for u in self._course.get_units()])
self._lessons_by_skill = {}
for lesson in self._course.get_lessons_for_all_units():
skill_list = lesson.properties.get(LESSON_SKILL_LIST_KEY, [])
for skill_id in skill_list:
self._lessons_by_skill.setdefault(skill_id, []).append(lesson)
self._skill_infos = {}
for skill in self._skill_graph.skills:
locations = []
for lesson in self._lessons_by_skill.get(skill.id, []):
unit = self._units[lesson.unit_id]
locations.append(LocationInfo(unit, lesson))
self._skill_infos[skill.id] = SkillInfo(skill, locations)
for skill in self._skill_graph.skills:
prerequisites = []
for pid in skill.prerequisite_ids:
prerequisites.append(self._skill_infos[pid])
self._skill_infos[skill.id].prerequisites = prerequisites
def build_successors(self):
"""Returns a dictionary keyed by skills' ids.
The values are sets of successors' ids."""
successors = {}
for si in self._skill_infos.values():
for p in si.prerequisites:
successors.setdefault(p.id, set()).add(si.id)
if si.id not in successors:
successors[si.id] = set()
return successors
def _topo_sort(self):
"""Returns topologically sorted co-sets."""
successors = self.build_successors()
ret = []
co_set = set(
successors.keys()) - reduce(
set.union, successors.values()) # Skills with no prerequisites.
while True:
if not co_set:
break
ret.append(co_set)
for x in co_set:
del successors[x]
for src, dst in successors.items():
successors[src] = dst - co_set
co_set = set(successors.keys()) - reduce(
set.union, successors.values(), set())
if successors: # There is unvisited nodes -> there is a cycle.
return None
else:
return ret
def _set_topological_sort_index(self):
chain = []
for x in self._topo_sort():
chain.extend(list(x))
for skill in self._skill_graph.skills:
self._skill_infos[skill.id].set_topo_sort_index(
chain.index(skill.id))
@classmethod
def load(cls, course):
skill_graph = SkillGraph.load()
return cls.instance(skill_graph, course)
def get_lessons_for_skill(self, skill):
return self._lessons_by_skill.get(skill.id, [])
def get_skills_for_lesson(self, lesson_id):
"""Get the skills assigned to the given lesson.
Args:
lesson_id. The id of the lesson.
Returns:
A list of SkillInfo objects.
"""
# TODO(jorr): Can we stop relying on the unit and just use lesson id?
lesson = self._course.find_lesson_by_id(None, lesson_id)
skill_list = lesson.properties.get(LESSON_SKILL_LIST_KEY, [])
return [self._skill_infos[skill_id] for skill_id in skill_list]
def successors(self, skill_info):
"""Get the successors to the given skill.
Args:
skill_info. A SkillInfo object.
Returns:
A list of SkillInfo objects.
"""
return {
self._skill_infos[s.id]
for s in self._skill_graph.successors(skill_info.id)}
def skills(self, sort_by='name'):
if sort_by == 'name':
return sorted(self._skill_infos.values(), key=lambda x: x.name)
elif sort_by == 'lesson':
return sorted(
self._skill_infos.values(), key=lambda x: x.sort_key())
elif sort_by == 'prerequisites':
self._set_topological_sort_index()
return sorted(
self._skill_infos.values(), key=lambda x: x.topo_sort_key())
else:
raise ValueError('Invalid sort option.')
def get_skill(self, skill_id):
return self._skill_infos[skill_id]
def add_skill_to_lessons(self, skill, locations):
"""Add the skill to the given lessons.
Args:
skill: SkillInfo. The skill to be added
locations: Iterable of LocationInfo. The locations to add the skill.
""" # Add back references to the skill from the request payload
for loc in locations:
unit, lesson = resource.Key.fromstring(loc['key']).get_resource(
self._course)
lesson.properties.setdefault(LESSON_SKILL_LIST_KEY, []).append(
skill.id)
assert self._course.update_lesson(lesson)
# pylint: disable=protected-access
skill._locations.append(LocationInfo(unit, lesson))
self._course.save()
self._lessons_by_skill.setdefault(skill.id, []).extend(locations)
def delete_skill_from_lessons(self, skill):
#TODO(broussev): check, and if need be, refactor pre-save lesson hooks
if not self._lessons_by_skill.get(skill.id):
return
for lesson in self._lessons_by_skill[skill.id]:
lesson.properties[LESSON_SKILL_LIST_KEY].remove(skill.id)
assert self._course.update_lesson(lesson)
self._course.save()
del self._lessons_by_skill[skill.id]
# pylint: disable=protected-access
skill._locations = []
class LocationListRestHandler(utils.BaseRESTHandler):
"""REST handler to list all locations."""
URL = '/rest/modules/skill_map/location_list'
def get(self):
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
location_list = []
for lesson in self.get_course().get_lessons_for_all_units():
unit = self.get_course().find_unit_by_id(lesson.unit_id)
location_list.append(LocationInfo(unit, lesson))
payload_dict = {'location_list': location_list}
transforms.send_json_response(self, 200, '', payload_dict)
class SkillRestHandler(utils.BaseRESTHandler):
"""REST handler to manage skills."""
XSRF_TOKEN = 'skill-handler'
SCHEMA_VERSIONS = ['1']
URL = '/rest/modules/skill_map/skill'
@classmethod
def get_schema(cls):
"""Return the schema for the skill editor."""
return ResourceSkill.get_schema(course=None, key=None)
def get(self):
"""Get a skill."""
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
key = self.request.get('key')
skill_map = SkillMap.load(self.get_course())
payload_dict = {
'skill_list': skill_map.skills(),
'diagnosis': skill_map_metrics.SkillMapMetrics(skill_map).diagnose()
}
if key:
payload_dict['skill'] = skill_map.get_skill(int(key))
transforms.send_json_response(
self, 200, '', payload_dict=payload_dict,
xsrf_token=crypto.XsrfTokenManager.create_xsrf_token(
self.XSRF_TOKEN))
def delete(self):
"""Deletes a skill."""
key = int(self.request.get('key'))
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN, {'key': key}):
return
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
errors = []
skill_graph = SkillGraph.load()
skill_map = SkillMap.load(self.get_course())
skill = skill_map.get_skill(key)
# Note, first delete from lessons and then from the skill graph
skill_map.delete_skill_from_lessons(skill)
skill_graph.delete(key, errors)
skill_map = SkillMap.load(self.get_course())
if errors:
self.validation_error('\n'.join(errors), key=key)
return
payload_dict = {
'skill_list': skill_map.skills(),
'diagnosis': skill_map_metrics.SkillMapMetrics(skill_map).diagnose()
}
transforms.send_json_response(self, 200, 'Skill deleted.',
payload_dict=payload_dict)
def put(self):
request = transforms.loads(self.request.get('request'))
key = request.get('key')
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {}):
return
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload = request.get('payload')
json_dict = transforms.loads(payload)
python_dict = transforms.json_to_dict(
json_dict, self.get_schema().get_json_schema_dict(),
permit_none_values=True)
version = python_dict.get('version')
if version not in self.SCHEMA_VERSIONS:
self.validation_error('Version %s not supported.' % version)
return
errors = []
course = self.get_course()
skill_graph = SkillGraph.load()
if key:
key_after_save = skill_graph.update(key, python_dict, errors)
else:
skill = Skill.build(
python_dict.get('name'), python_dict.get('description'),
python_dict.get('prerequisites'))
key_after_save = skill_graph.add(skill, errors=errors).id
skill_map = SkillMap.load(course)
skill = skill_map.get_skill(key_after_save)
locations = python_dict.get('locations', [])
skill_map.delete_skill_from_lessons(skill)
skill_map.add_skill_to_lessons(skill, locations)
if errors:
self.validation_error('\n'.join(errors), key=key)
return
payload_dict = {
'key': key_after_save,
'skill': skill,
'skill_list': skill_map.skills(),
'diagnosis': skill_map_metrics.SkillMapMetrics(skill_map).diagnose()
}
transforms.send_json_response(
self, 200, 'Saved.', payload_dict=payload_dict)
class SkillMapHandler(dashboard.DashboardHandler):
ACTION = 'skill_map'
URL = '/modules/skill_map'
NAV_BAR_TAB = 'Skill Map'
def get_skill_map(self):
self.course = courses.Course(self)
if not self.course.app_context.is_editable_fs():
self.render_page({
'page_title': self.format_title('Skills Map'),
'main_content': jinja2.utils.Markup(
'<h1>Read-only course.</h1>')
})
return
tab = self.request.get('tab')
if tab == 'skills_table':
self.get_skills_table()
elif tab == 'dependency_graph':
self.get_dependency_graph()
def get_skills_table(self):
skill_map = SkillMap.load(self.course)
skills = skill_map.skills() or []
template_values = {
'skills_autocomplete': json.dumps(
[{'id': s.id, 'label': s.name} for s in skills])}
main_content = self.get_template(
'skills_table.html', [TEMPLATES_DIR]).render(template_values)
self.render_page({
'page_title': self.format_title('Skills Table'),
'main_content': jinja2.utils.Markup(main_content)})
def get_dependency_graph(self):
skill_map = SkillMap.load(self.course)
nodes = []
n2i = {}
for ind, skill in enumerate(skill_map.skills()):
nodes.append({'id': skill.name})
n2i[skill.name] = ind
links = []
for tgt in skill_map.skills():
for src in tgt.prerequisites:
links.append({'source': n2i[src.name], 'target': n2i[tgt.name]})
template_values = {
'nodes': json.dumps(nodes), 'links': json.dumps(links)}
main_content = self.get_template(
'dependency_graph.html', [TEMPLATES_DIR]).render(template_values)
self.render_page({
'page_title': self.format_title('Dependencies Graph'),
'main_content': jinja2.utils.Markup(main_content)
})
class SkillCompletionAggregate(models.BaseEntity):
"""Representation for the count of skill completions during time.
Each entity of this class must be created using the skill_id as a
key name.
The aggregate field is a json string representing a dictionary. It
maps dates in skill_map.skill_map.CountSkillCompletion.DATE_FORMAT
with the number of students that completed that skill before the given
date.
"""
name = db.StringProperty()
aggregate = db.TextProperty(indexed=False)
class CountSkillCompletion(jobs.MapReduceJob):
"""Aggregates the progress of students for each skill."""
DATE_FORMAT = '%Y-%m-%d'
@classmethod
def entity_class(cls):
return models.Student
@staticmethod
def get_description():
return 'counting students that completed or attempted a skill'
def build_additional_mapper_params(self, app_context):
"""Creates a map from skill ids to skill names."""
course = courses.Course.get(app_context)
skill_map = SkillMap.load(course)
result = {}
for skill in skill_map.skills():
packed_name = self.pack_name(skill.id, skill.name)
if not packed_name:
logging.warning('Skill not processed: the id can\'t be packed.'
' Id %s', skill.id)
continue
result[skill.id] = packed_name
return {'skills': result}
@staticmethod
def pack_name(skill_id, skill_name):
join_str = '--'
if join_str not in str(skill_id):
return '{}{}{}'.format(skill_id, join_str, skill_name)
return None
@staticmethod
def unpack_name(packed_name):
return packed_name.split('--', 1)
@staticmethod
def map(item):
"""Extracts the skill progress of the student.
Yields:
A tuple. The first element is the packed id of the skill (item)
and the second is a json tuple (state, date_str). If the skill
is not completed, then the date is None.
"""
mapper_params = context.get().mapreduce_spec.mapper.params
skills = mapper_params.get('skills', {})
sprogress = SkillCompletionTracker().get_skills_progress(
item, skills.keys())
for skill_id, skill_progress in sprogress.iteritems():
state, timestamp = skill_progress
date_str = time.strftime(CountSkillCompletion.DATE_FORMAT,
time.localtime(timestamp))
packed_name = skills[skill_id]
if state == SkillCompletionTracker.COMPLETED:
yield packed_name, transforms.dumps((state, date_str))
else:
yield packed_name, transforms.dumps((state, None))
@staticmethod
def reduce(item_id, values):
"""Aggregates the number of students that completed or are in progress.
Saves the dates of completion in a SkillCompletionAggregate entity.
The name of the key of this entity is the skill id.
Args:
item_id: the packed_name of the skill
values: a list of json tuples (state, date_str). If the skill
is not completed, then the date is None.
Yields:
A 4-uple with the following schema:
id, name, complete_count, in_progress_count
"""
skill_id, name = CountSkillCompletion.unpack_name(item_id)
in_progress_count = 0
aggregate = defaultdict(lambda: 0)
completed_count = 0
for value in values: # Aggregate the value per date
state, date = tuple(transforms.loads(value))
if date:
aggregate[date] += 1
completed_count += 1
elif state == SkillCompletionTracker.IN_PROGRESS:
in_progress_count += 1
# Make partial sums
partial_sum = 0
for date in sorted(aggregate.keys()):
partial_sum += aggregate[date]
aggregate[date] = partial_sum
# Store the entity
SkillCompletionAggregate(key_name=str(skill_id), name=name,
aggregate=transforms.dumps(aggregate)).put()
yield (skill_id, name, completed_count, in_progress_count)
class SkillAggregateRestHandler(utils.BaseRESTHandler):
"""REST handler to manage the aggregate count of skill completions."""
SCHEMA_VERSIONS = ['1']
URL = '/rest/modules/skill_map/skill_aggregate_count'
MAX_REQUEST_SIZE = 10
def get(self):
"""Get a the aggregate information for a set of skills.
In the request, expects a field ids with a json list of skill ids. If
more than SkillAggregateRestHandler.MAX_REQUEST_SIZE are sent
an error response will be returned.
In the field 'payload' of the response returns a json dictionary:
{'column_headers': ['Date', 'id1', 'id2', ... ]
'data': [
['date', 'count skill with id1', 'count skill with id2', ...]
}
The dates returned are in format CountSkillCompletion.DATE_FORMAT
"""
if not roles.Roles.is_course_admin(self.app_context):
transforms.send_json_response(self, 401, 'Access denied.', {})
return
ids = self.request.get_all('ids')
data = []
headers = ['Date']
if ids:
if len(ids) >= self.MAX_REQUEST_SIZE:
# Given the number of possible skills in a course, this
# method can take a while finish if we don't limit the
# size of the request.
self.validation_error('Request with more than'
' {} skills'.format(self.MAX_REQUEST_SIZE))
return
aggregates = []
dates = set()
# The complexity of the following cycles is
# O(len(dates)*log(len(date))). Expect len(dates) < 1000 (3 years)
for skill_id in ids:
aggregate = SkillCompletionAggregate.get_by_key_name(
str(skill_id))
if aggregate:
headers.append(skill_id)
aggregate = transforms.loads(aggregate.aggregate)
aggregates.append(aggregate)
dates.update(aggregate.keys())
dates = sorted(list(dates))
last_row = [0] * len(ids)
for date in dates:
for index, count in enumerate(last_row):
last_row[index] = max(last_row[index],
aggregates[index].get(date, 0))
data.append([date] + last_row) # no aliasing
payload_dict = {'column_headers': headers, 'data': data}
transforms.send_json_response(
self, 200, '', payload_dict=payload_dict)
class SkillMapDataSource(data_sources.SynchronousQuery):
@staticmethod
def required_generators():
return [CountSkillCompletion]
@classmethod
def get_name(cls):
return 'skill_map_analytics'
@staticmethod
def fill_values(app_context, template_values, counts_generator):
"""Fills template_values with counts from CountSkillCompletion output.
Works with the skill_map_analytics.html jinja template.
Stores in the key 'counts' of template_values a table with the
following format:
skill name, count of completions, counts of 'in progress'
Adds a row for each skill in the output of CountSkillCompletion job.
"""
result = jobs.MapReduceJob.get_results(counts_generator)
# remove the id of the skill
result = [i[1:] for i in result]
template_values['counts'] = transforms.dumps(sorted(result))
class SkillCompletionTracker(object):
"""Interface for the student completion the skills.
This class performs the same functions as the class
models.progress.UnitLessonCompletionTracker. It saves the progress of
each skill into a models.models.StudentPropertyEntity instance with key
STUDENT_ID-SkillCompletionTracker.PROPERTY_KEY.
The value attibute on the StudentPropertyEntity is a json string with
the following schema:
{
skill_id: {skill_progress: timestamp, ... }
}
The progress of the skill can have three values, similar to the state of
a lesson:
NOT_ATTEMPTED: any of the lessons mapped with the skill are
UnitLessonCompletionTracker.IN_PROGRESS_STATE
IN_PROGRESS: at least one of the lessons mapped with the skill is not
in UnitLessonCompletionTracker.NOT_ATTEMPTED_STATE.
COMPLETED: all the lessons mapped with the skill are in
UnitLessonCompletionTracker.COMPLETED_STATE.
The timestamp is and integer that registers the last change in the state
of that skill progress.
"""
# TODO(milit): add manual progress of the skills
PROPERTY_KEY = 'skill-completion'
COMPLETED = str(progress.UnitLessonCompletionTracker.COMPLETED_STATE)
IN_PROGRESS = str(progress.UnitLessonCompletionTracker.IN_PROGRESS_STATE)
NOT_ATTEMPTED = str(
progress.UnitLessonCompletionTracker.NOT_STARTED_STATE)
# Elements of the course which progress affects the progress of skills
PROGRESS_DEPENDENCIES = set(['lesson'])
def __init__(self, course=None):
"""Creates an instance of SkillCompletionTracker.
Args:
course: the course to load. If the course is None, the
only actions that can be performed are get_or_create_progress,
get_skill_progress and update_skill_progress.
"""
# The course as an optional parameter allows access to the progress
# without loading the SkillMap. (Like in a map reduce job)
self.course = course
self._skill_map = None
if course:
self._skill_map = SkillMap.load(course)
def _get_or_create_progress(self, student):
sprogress = models.StudentPropertyEntity.get(
student, self.PROPERTY_KEY)
if not sprogress:
sprogress = models.StudentPropertyEntity.create(
student=student, property_name=self.PROPERTY_KEY)
return sprogress
def get_skills_progress(self, student, skill_bunch):
"""Returns the more advanced state of the skill for the student.
This function retrieves the recorded progress of the skill, does not
calculate it again from the linear progress of the course.
Args:
student: an instance of models.models.StudentEntity class.
skill_bunch: an iterable of skill ids.
Returns:
A dictionary mapping the skill ids in skill_bunch with tuples
(progress, timestamp). For the state NOT_ATTEMPTED the timestamp
is always 0.
"""
sprogress = self._get_or_create_progress(student)
if not sprogress.value:
return {id: (self.NOT_ATTEMPTED, 0) for id in skill_bunch}
sprogress = transforms.loads(sprogress.value)
result = {}
for skill_id in skill_bunch:
skill_progress = sprogress.get(str(skill_id)) # After transforms
# the keys of the progress are str, not int.
if not skill_progress:
result[skill_id] = (self.NOT_ATTEMPTED, 0)
elif self.COMPLETED in skill_progress:
result[skill_id] = (self.COMPLETED,
skill_progress[self.COMPLETED])
elif self.IN_PROGRESS in skill_progress:
result[skill_id] = (self.IN_PROGRESS,
skill_progress[self.IN_PROGRESS])
return result
@staticmethod
def update_skill_progress(progress_value, skill_id, state):
"""Assigns state to the skill in the student progress.
If there is a change on the state of the skill, saves the current
time (seconds since epoch).
Args:
progress_value: an dictiory. Corresponds to the value of an
models.models.StudentPropertyEntity instance that tracks the
skill progress.
skill_id: the id of the skill to modify.
state: a valid progress state for the skill.
"""
skill_progress = progress_value.get(str(skill_id))
if not skill_progress:
progress_value[str(skill_id)] = {state: time.time()}
elif not state in skill_progress:
progress_value[str(skill_id)][state] = time.time()
def recalculate_progress(self, lprogress_tracker, lprogress, skill):
"""Calculates the progress of the skill from the linear progress.
Args:
lprogress_tracker: an instance of UnitLessonCompletionTracker.
lprogress: an instance of StudentPropertyEntity that holds
the linear progress of the student in the course.
skill: an instance of SkillInfo or Skill.
Returns:
The calculated progress of the skill. If self does not have
a valid skill_map instance (was initialized with no arguments)
then this method returns None.
"""
# It's horrible to pass redundant arguments but we avoid
# obtaining the lprogress from the db multiple times.
if not self._skill_map:
return
skill_lessons = self._skill_map.get_lessons_for_skill(skill)
state_counts = defaultdict(lambda: 0)
for lesson in skill_lessons:
status = lprogress_tracker.get_lesson_status(
lprogress, lesson.unit_id, lesson.lesson_id)
state_counts[status] += 1
if (state_counts[lprogress_tracker.COMPLETED_STATE] ==
len(skill_lessons)):
return self.COMPLETED
if (state_counts[lprogress_tracker.IN_PROGRESS_STATE] +
state_counts[lprogress_tracker.COMPLETED_STATE]):
# At leat one lesson in progress or completed
return self.IN_PROGRESS
return self.NOT_ATTEMPTED
def update_skills(self, student, lprogress, lesson_id):
"""Recalculates and saves the progress of all skills mapped to lesson.
If self does not have a valid skill_map instance (was initialized
with no arguments) then this method does not perform any action.
Args:
student: an instance of StudentEntity.
lprogress: an instance of StudentPropertyEntity with the linear
progress of student.
lesson_id: the id of the lesson.
"""
# TODO(milit): Add process for lesson None.
if not self._skill_map:
return
lprogress_tracker = progress.UnitLessonCompletionTracker(self.course)
sprogress = self._get_or_create_progress(student)
progress_value = {}
if sprogress.value:
progress_value = transforms.loads(sprogress.value)
skills = self._skill_map.get_skills_for_lesson(lesson_id)
for skill in skills:
new_progress = self.recalculate_progress(
lprogress_tracker, lprogress, skill)
self.update_skill_progress(progress_value, skill.id, new_progress)
sprogress.value = transforms.dumps(progress_value)
sprogress.put()
def post_update_progress(course, student, lprogress, event_entity, event_key):
"""Updates the skill progress after the update of the linear progress.
Args:
course: the current course.
student: an instance of StudentEntity.
lprogress: an instance of StudentPropertyEntity with the linear
progress. This function is called before the put() to the database,
this instance must have the latest changes.
event_entity: a string. The kind of event or progress that was
trigered. Only events in SkillCompletionTracker.PROGRESS_DEPENDENCIES
will be processed, others will be ignored.
event_key: the element that triggered the update of the linear
progress. This key is the same used in the linear progress and
must be compatible with the method
progress.UnitLessonCompletionTracker.get_elements_from_key. If,
for example, event_entity is 'lesson', the event key could be
the key of the lesson or the key of any subentities of the lesson.
"""
if event_entity not in SkillCompletionTracker.PROGRESS_DEPENDENCIES:
return
key_elements = progress.UnitLessonCompletionTracker.get_elements_from_key(
event_key)
lesson_id = key_elements.get('lesson')
unit_id = key_elements.get('unit')
if not (lesson_id and unit_id and
course.version == courses.CourseModel13.VERSION):
return
if not isinstance(course.find_lesson_by_id(unit_id, lesson_id),
courses.Lesson13):
return
SkillCompletionTracker(course).update_skills(
student, lprogress, lesson_id)
def register_tabs():
tabs.Registry.register(
'skill_map', 'skills_table', 'Skills Table',
href='modules/skill_map?action=skill_map&tab=skills_table')
tabs.Registry.register(
'skill_map', 'dependency_graph', 'Skills Graph',
href='modules/skill_map?action=skill_map&tab=dependency_graph')
skill_map_visualization = analytics.Visualization(
'skill_map',
'Skill Map Analytics',
'templates/skill_map_analytics.html',
data_source_classes=[SkillMapDataSource])
tabs.Registry.register('analytics', 'skill_map', 'Skill Map',
[skill_map_visualization])
def lesson_rest_handler_schema_load_hook(lesson_field_registry):
skill_type = schema_fields.FieldRegistry('Skill')
skill_type.add_property(schema_fields.SchemaField(
'skill', 'Skill', 'number', optional=True, i18n=False))
lesson_field_registry.add_property(schema_fields.FieldArray(
'skills', 'Skills', optional=True, item_type=skill_type,
extra_schema_dict_values={
'className': 'skill-panel inputEx-Field inputEx-ListField'}))
def lesson_rest_handler_pre_load_hook(lesson, lesson_dict):
lesson_dict['skills'] = [
{'skill': skill} for skill in lesson.properties.get(
LESSON_SKILL_LIST_KEY, [])]
def lesson_rest_handler_pre_save_hook(lesson, lesson_dict):
if 'skills' in lesson_dict:
lesson.properties[LESSON_SKILL_LIST_KEY] = [
item['skill'] for item in lesson_dict['skills']]
def course_outline_extra_info_decorator(course, unit_or_lesson):
if isinstance(unit_or_lesson, courses.Lesson13):
lesson = unit_or_lesson
skill_map = SkillMap.load(course)
# TODO(jorr): Should this list be being created by the JS library?
skill_list = safe_dom.Element('ol', className='skill-display-root')
for skill in skill_map.get_skills_for_lesson(lesson.lesson_id):
skill_list.add_child(
safe_dom.Element('li', className='skill').add_text(skill.name))
return skill_list
return None
def welcome_handler_import_skills_callback(app_ctx, unused_errors):
"""Loads, parses and verifies all content for a course."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_ctx.get_namespace_name())
import_skill_map(app_ctx)
finally:
namespace_manager.set_namespace(old_namespace)
def lesson_title_provider(handler, app_context, unit, lesson, student):
if not isinstance(lesson, courses.Lesson13):
return None
env = courses.Course.get_environ(app_context)
if env['course'].get('display_skill_widget') is False:
return None
skill_map = SkillMap.load(handler.get_course())
skill_list = skill_map.get_skills_for_lesson(lesson.lesson_id)
def filter_visible_locations(skill):
"""Filter out references to lessons which are not visible."""
locations = []
for location in skill.locations:
if not (
location.unit.now_available
and location.unit in handler.get_track_matching_student(student)
and location.lesson.now_available
):
continue
locations.append(location)
# pylint: disable=protected-access
clone = SkillInfo(skill._skill, locations=locations,
topo_sort_index=skill._topo_sort_index)
clone._prerequisites = skill._prerequisites
# pylint: enable=protected-access
return clone
def not_in_this_lesson(skill_list):
"""Filter out skills which are taught in the current lesson."""
return [
filter_visible_locations(skill) for skill in skill_list
if lesson not in [loc.lesson for loc in skill.locations]]
depends_on_skills = set()
leads_to_skills = set()
dependency_map = {}
for skill in skill_list:
skill = filter_visible_locations(skill)
prerequisites = skill.prerequisites
successors = skill_map.successors(skill)
depends_on_skills.update(prerequisites)
leads_to_skills.update(successors)
dependency_map[skill.id] = {
'depends_on': [s.id for s in prerequisites],
'leads_to': [s.id for s in successors]
}
template_values = {
'lesson': lesson,
'unit': unit,
'can_see_drafts': courses_module.courses.can_see_drafts(app_context),
'skill_list': skill_list,
'depends_on_skills': not_in_this_lesson(depends_on_skills),
'leads_to_skills': not_in_this_lesson(leads_to_skills),
'dependency_map': transforms.dumps(dependency_map)
}
return jinja2.Markup(
handler.get_template('lesson_header.html', [TEMPLATES_DIR]
).render(template_values))
def widget_display_flag_schema_provider(unused_course):
return schema_fields.SchemaField(
'course:display_skill_widget', 'Student Skill Widget',
'boolean', optional=True, description='Display the skills taught in '
'each lesson.')
def import_skill_map(app_ctx):
fn = os.path.join(
appengine_config.BUNDLE_ROOT, 'data', 'skills.json')
with open(fn, 'r') as fin:
nodes = json.loads(fin.read())
# add skills
id_to_key = {}
key_to_id = {}
key_to_nodes = {}
skill_graph = SkillGraph.load()
for node in nodes:
skill = skill_graph.add(Skill.build(node['name'], node['description']))
id_to_key[node['id']] = skill.id
key_to_id[skill.id] = node['id']
key_to_nodes[skill.id] = node
# add skills prerequisites
skill_graph = SkillGraph.load()
for skill in skill_graph.skills:
key = skill.id
node = key_to_nodes[key]
prerequisite_ids = node.get('prerequisites')
if prerequisite_ids:
pre_keys = [id_to_key[pid] for pid in prerequisite_ids]
for pre_key in pre_keys:
try:
skill_graph.add_prerequisite(key, pre_key)
except AssertionError:
logging.exception(
'Invalid skill prerequisite: %s, %s', key, pre_key)
# build mapping from lesson index to lesson id
course = courses.Course(None, app_context=app_ctx)
units = {u.unit_id: u for u in course.get_units()}
lesson_map = {}
for lesson in course.get_lessons_for_all_units():
unit = units[lesson.unit_id]
lesson_map[(unit.index, lesson.index)] = lesson
# update lessons properties with skill ids
skill_graph = SkillGraph.load()
for skill in skill_graph.skills:
node = key_to_nodes[skill.id]
for loc in node['locations']:
ul_tuple = (loc['unit'], loc['lesson'])
lesson = lesson_map[ul_tuple]
lesson.properties.setdefault(
LESSON_SKILL_LIST_KEY, []).append(skill.id)
assert course.update_lesson(lesson)
course.save()
def notify_module_enabled():
def get_action(handler):
handler.redirect('/modules/skill_map?action=skill_map&tab=skills_table')
dashboard.DashboardHandler.COURSE_OUTLINE_EXTRA_INFO_ANNOTATORS.append(
course_outline_extra_info_decorator)
dashboard.DashboardHandler.COURSE_OUTLINE_EXTRA_INFO_TITLES.append('Skills')
dashboard.DashboardHandler.add_nav_mapping(
SkillMapHandler.ACTION, SkillMapHandler.NAV_BAR_TAB)
dashboard.DashboardHandler.get_actions.append('skill_map')
setattr(dashboard.DashboardHandler, 'get_skill_map', get_action)
dashboard.DashboardHandler.EXTRA_CSS_HREF_LIST.append(
'/modules/skill_map/resources/css/common.css')
dashboard.DashboardHandler.EXTRA_CSS_HREF_LIST.append(
'/modules/skill_map/resources/css/course_outline.css')
dashboard.DashboardHandler.EXTRA_CSS_HREF_LIST.append(
'/modules/skill_map/resources/css/skill_tagging.css')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/skill_map/resources/js/course_outline.js')
dashboard.DashboardHandler.EXTRA_JS_HREF_LIST.append(
'/modules/skill_map/resources/js/skill_tagging_lib.js')
lessons.UnitHandler.set_lesson_title_provider(lesson_title_provider)
LessonRESTHandler.SCHEMA_LOAD_HOOKS.append(
lesson_rest_handler_schema_load_hook)
LessonRESTHandler.PRE_LOAD_HOOKS.append(
lesson_rest_handler_pre_load_hook)
LessonRESTHandler.PRE_SAVE_HOOKS.append(
lesson_rest_handler_pre_save_hook)
LessonRESTHandler.REQUIRED_MODULES.append('inputex-list')
LessonRESTHandler.REQUIRED_MODULES.append('inputex-number')
# TODO(jorr): Use HTTP GET rather than including them as templates
LessonRESTHandler.ADDITIONAL_DIRS.append(os.path.join(TEMPLATES_DIR))
LessonRESTHandler.EXTRA_JS_FILES.append('skill_tagging.js')
transforms.CUSTOM_JSON_ENCODERS.append(LocationInfo.json_encoder)
transforms.CUSTOM_JSON_ENCODERS.append(SkillInfo.json_encoder)
WelcomeHandler.COPY_SAMPLE_COURSE_HOOKS.append(
welcome_handler_import_skills_callback)
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(_SkillEntity)
courses.Course.OPTIONS_SCHEMA_PROVIDERS.setdefault(
courses.Course.SCHEMA_SECTION_COURSE, []).append(
widget_display_flag_schema_provider)
progress.UnitLessonCompletionTracker.POST_UPDATE_PROGRESS_HOOK.append(
post_update_progress)
data_sources.Registry.register(SkillMapDataSource)
resource.Registry.register(ResourceSkill)
i18n_dashboard.TranslatableResourceRegistry.register(
TranslatableResourceSkill)
register_tabs()
def register_module():
"""Registers this module in the registry."""
underscore_js_handler = sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'underscore-1.4.3.zip'))
d3_js_handler = sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'd3-3.4.3.zip'))
dep_graph_handler = sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'dependo-0.1.4.zip'))
global_routes = [
(RESOURCES_URI + '/css/.*', tags.ResourcesHandler),
(RESOURCES_URI + '/js/course_outline.js', tags.JQueryHandler),
(RESOURCES_URI + '/js/lesson_header.js', tags.JQueryHandler),
(RESOURCES_URI + '/js/skill_tagging_lib.js', tags.IifeHandler),
(RESOURCES_URI + '/d3-3.4.3/(d3.min.js)', d3_js_handler),
(RESOURCES_URI + '/underscore-1.4.3/(underscore.min.js)',
underscore_js_handler),
(RESOURCES_URI + '/dependo-0.1.4/(.*)', dep_graph_handler)
]
namespaced_routes = [
(LocationListRestHandler.URL, LocationListRestHandler),
(SkillRestHandler.URL, SkillRestHandler),
(SkillMapHandler.URL, SkillMapHandler),
(SkillAggregateRestHandler.URL, SkillAggregateRestHandler)
]
global skill_mapping_module # pylint: disable=global-statement
skill_mapping_module = custom_modules.Module(
'Skill Mapping Module',
'Provide skill mapping of course content',
global_routes,
namespaced_routes,
notify_module_enabled=notify_module_enabled)
return skill_mapping_module
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to calculate metrics for the SkillMap object."""
__author__ = 'Milagro Teruel (milit@google.com)'
import networkx
from collections import defaultdict
CHAINS_MIN_LENGTH = 10 # TODO(milit) Add this as a setting?
class SkillMapMetrics(object):
"""This class works as interface with networkx library.
Holds a DiGraph equivalent to the skill map, created at initialization.
"""
def __init__(self, skill_map):
"""Creates an instance of networkx.DiGraph from a skill_map object."""
self.nxgraph = networkx.DiGraph()
self.successors = skill_map.build_successors()
for node, dsts in self.successors.iteritems():
for dst in dsts:
self.nxgraph.add_edge(node, dst)
if not dsts: # Add the single node
self.nxgraph.add_node(node)
def simple_cycles(self):
"""Finds the simple cycles (with no repeated edges) in the graph.
A cycle is called simple if no node is repeated.
Returns:
A list with cycles. Each cycle is represented as a list of
skills ids in the order they appear in the graph.
"""
return list(networkx.simple_cycles(self.nxgraph))
def singletons(self):
"""A singleton is a weakly connected component that has only one node.
Returns:
A list with the singleton nodes.
"""
components = networkx.weakly_connected_components(self.nxgraph)
return [component[0] for component in components
if len(component) == 1]
def _get_longest_paths(self, src, destinations, min_length, topo_sort):
"""Returns the paths from src to destinations longer than min_length.
See also: http://en.wikipedia.org/wiki/Longest_path_problem, section
"Acyclic graphs and critical paths". This implementation is in
reverse order with respect to the algoritm in the article.
Args:
src: a node of the graph. It is the start of the paths returned.
destinations: an iterable of nodes in the graph. Only one path
(if any) in the returned set will end in each of this nodes.
min_len: a number. Minimum size of the path to be returned by
this method.
topo_sort: a sorted iterable of the nodes in the graph. The order
corresponds to one of the topological orders of the graph.
Returns:
A list of paths starting at src and ending at one of the nodes in
destinations. Each path is represented as a sorted list of
nodes, and has a length smaller than min_length.
"""
def get_path_from_ancestors(ancestors, dst):
"""Traverses the ancestors dict to find the path ending at dst.
Args:
ancestors: a dictionary. Represents a path in the graph that
ends at destination. Maps nodes to their ancestor in this path.
dst: the node ending the path.
Returns:
A path ending at dst represented as an ordered list of nodes.
"""
current_dst = ancestors[dst]
path = [dst]
while current_dst:
path.insert(0, current_dst) # insert at the begginning
current_dst = ancestors[current_dst]
return path
# Contruct the distances and ancestors from src to all nodes in nxgraph.
# Maps nodes to its ancestors in longest path.
ancestors = {src: None}
# Maps nodes to distance from src in longest path.
distances = defaultdict(lambda: -1) # -1 means not reachable.
distances[src] = 0
for next_dst in topo_sort:
if distances[next_dst] == -1: # No visited -> no connected to src.
continue
for successor in self.successors[next_dst]:
if distances[successor] < distances[next_dst] + 1:
ancestors[successor] = next_dst
distances[successor] = distances[next_dst] + 1
# Construct the paths only to the nodes in destinations
result = []
for dst in destinations:
if distances[dst] >= min_length:
result.append(get_path_from_ancestors(ancestors, dst))
return result
def long_chains(self, min_length=None):
"""Finds non cyclic shortest paths longer or equal that min_length.
The graph must be ACYCLIC. The complexity of the algorithm is:
O(topo_sort) + O(|edges|*|nodes|*|nodes with no ancestors|)
= O(|edges|*|nodes|*|nodes with no ancestors|)
No simple path in the result is contained inside other simple path.
Args:
min_length: The minimum length of a chain to be returned by the
function. If min_length is None, the maximum length for the path is
modules.skill_map.skill_map_metrics.CHAINS_MIN_LENGTH. The length
of a chain is the numbers of edges in the chain.
Returns:
A list of long chains. Each long chain is an ordered list of
nodes that forms the path.
Raises:
networkx.NetworkXUnbounded if the graph has a cycle.
"""
if not min_length:
min_length = CHAINS_MIN_LENGTH
# We can do this from the nxgraph, but this has better complexity.
if not self.successors:
return []
initial_nodes = set(
self.successors.keys()) - reduce(
set.union, self.successors.values()) # nodes with no ancestors
# nodes with no successors
end_nodes = [node for node in self.successors
if not self.successors[node]]
result = []
topo_sort = networkx.topological_sort(self.nxgraph)
for src in initial_nodes:
result.extend(self._get_longest_paths(
src, end_nodes, min_length, topo_sort))
return result
def diagnose(self):
"""Calculates information about the health of the graph.
Returns:
A dictionary with the following structure:
{
'cycles': [[ids of skills forming cycle], [...], ...],
'singletons': [skill_ids],
'long_chains': [[skill_ids...], [...], ...],
}
The minimum length that a chain must have to be included into the
long_chains field is
modules.skill_map.skill_map_metrics.CHAINS_MIN_LENGTH. If any
cycle is found in the graph, there will be no calculation of long
chains.
"""
cycles = self.simple_cycles()
long_chains = []
if not cycles:
long_chains = self.long_chains()
return {
'cycles': cycles,
'singletons': self.singletons(),
'long_chains': long_chains
}
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to provide a tag to embed activities into lesson bodies."""
import os
from xml.etree import cElementTree
from common import schema_fields
from common import tags
from models import courses
from models import custom_modules
# String. Course Builder root-relative path where resources for this module are.
_RESOURCES_PATH = os.path.join(
os.path.sep, 'modules', 'activity_tag', 'resources')
class Activity(tags.BaseTag):
"""A tag to embed activities into lesson bodies."""
binding_name = 'gcb-activity'
@classmethod
def name(cls):
return 'Activity'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, unused_handler):
activity_id = node.attrib.get('activityid')
script = cElementTree.XML("""
<div>
<script></script>
<div id="activityContents"></div>
</div>""")
script[0].set('src', 'assets/js/%s' % activity_id)
return script
def get_icon_url(self):
return os.path.join(_RESOURCES_PATH, 'activity.png')
def get_schema(self, handler):
"""The schema of the tag editor."""
activity_list = []
if handler:
course = courses.Course(handler)
if course.version == courses.COURSE_MODEL_VERSION_1_2:
return self.unavailable_schema(
'Not available in file-based courses.')
lesson_id = None
if handler.request:
lesson_id = handler.request.get('lesson_id')
activity_list = []
for unit in course.get_units():
for lesson in course.get_lessons(unit.unit_id):
filename = 'activity-%s.js' % lesson.lesson_id
if lesson.has_activity:
if lesson.activity_title:
title = lesson.activity_title
else:
title = filename
name = '%s - %s (%s) ' % (
unit.title, lesson.title, title)
activity_list.append((filename, name))
elif str(lesson.lesson_id) == lesson_id:
name = 'Current Lesson (%s)' % filename
activity_list.append((filename, name))
reg = schema_fields.FieldRegistry('Activity')
reg.add_property(
schema_fields.SchemaField(
'activityid', 'Activity Id', 'string', optional=True,
select_data=activity_list))
return reg
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(Activity.binding_name)
tags.EditorBlacklists.unregister(
Activity.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.unregister(
Activity.binding_name, tags.EditorBlacklists.ASSESSMENT_SCOPE)
tags.EditorBlacklists.unregister(
Activity.binding_name, tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
def on_module_enable():
tags.Registry.add_tag_binding(Activity.binding_name, Activity)
tags.EditorBlacklists.register(
Activity.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.register(
Activity.binding_name, tags.EditorBlacklists.ASSESSMENT_SCOPE)
tags.EditorBlacklists.register(
Activity.binding_name, tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
global custom_module # pylint: disable=global-statement
# Add a static handler for icons shown in the rich text editor.
global_routes = [(
os.path.join(_RESOURCES_PATH, '.*'), tags.ResourcesHandler)]
custom_module = custom_modules.Module(
'Embedded Activity',
'Adds a custom tag to embed an activity in a lesson.',
global_routes, [],
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable,
)
return custom_module
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder web application entry point."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import webapp2
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable=unused-import
from common import resource
from controllers import sites
from models import analytics
from models import custom_modules
from models import data_sources
from models import resources_display
from models import student_labels
# Import, register, & enable modules named in app.yaml's GCB_REGISTERED_MODULES.
appengine_config.import_and_enable_modules()
# Core "module" is always present and registered.
resource.Registry.register(resources_display.ResourceSAQuestion)
resource.Registry.register(resources_display.ResourceMCQuestion)
resource.Registry.register(resources_display.ResourceQuestionGroup)
custom_modules.Module(
'Core REST services', 'A module to host core REST services',
analytics.get_global_handlers(),
analytics.get_namespaced_handlers() +
data_sources.get_namespaced_handlers() +
student_labels.get_namespaced_handlers()
).enable()
# Collect routes (URL-matching regexes -> handler classes) for modules.
global_routes, namespaced_routes = custom_modules.Registry.get_all_routes()
# Configure routes available at '/%namespace%/' context paths
sites.ApplicationRequestHandler.bind(namespaced_routes)
app_routes = [(r'(.*)', sites.ApplicationRequestHandler)]
# enable Appstats handlers if requested
appstats_routes = []
if appengine_config.gcb_appstats_enabled():
import google.appengine.ext.appstats.ui as appstats_ui
# add all Appstats URL's to /admin/stats basepath
for path, handler in appstats_ui.URLMAP:
assert '.*' == path[:2]
appstats_routes.append(('/admin/stats/%s' % path[3:], handler))
# i18n configuration for jinja2
webapp2_i18n_config = {'translations_path': os.path.join(
appengine_config.BUNDLE_ROOT, 'modules/i18n/resources/locale')}
# init application
app = webapp2.WSGIApplication(
global_routes + appstats_routes + app_routes,
config={'webapp2_extras.i18n': webapp2_i18n_config},
debug=not appengine_config.PRODUCTION_MODE)
| Python |
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs all of the tests in parallel.
Execute this script from the Course Builder folder as:
python scripts/run_all_tests.py
"""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import os
import subprocess
import sys
import threading
import time
import yaml
# all test classes with a total count of tests in each
ALL_TEST_CLASSES = {
'tests.functional.admin_settings.AdminSettingsTests': 1,
'tests.functional.admin_settings.ExitUrlTest': 1,
'tests.functional.admin_settings.HtmlHookTest': 17,
'tests.functional.admin_settings.JinjaContextTest': 2,
'tests.functional.admin_settings.WelcomePageTests': 6,
'tests.functional.assets_rest.AssetsRestTest': 13,
'tests.functional.common_crypto.EncryptionManagerTests': 5,
'tests.functional.common_crypto.XsrfTokenManagerTests': 3,
'tests.functional.common_crypto.PiiObfuscationHmac': 2,
'tests.functional.common_crypto.GenCryptoKeyFromHmac': 2,
'tests.functional.common_crypto.GetExternalUserIdTests': 4,
'tests.functional.explorer_module.CourseExplorerTest': 3,
'tests.functional.explorer_module.CourseExplorerDisabledTest': 3,
'tests.functional.explorer_module.GlobalProfileTest': 1,
'tests.functional.controllers_review.PeerReviewControllerTest': 7,
'tests.functional.controllers_review.PeerReviewDashboardAdminTest': 1,
'tests.functional.controllers_review.PeerReviewDashboardStudentTest': 2,
'tests.functional.i18n.I18NCourseSettingsTests': 7,
'tests.functional.i18n.I18NMultipleChoiceQuestionTests': 6,
'tests.functional.model_analytics.AnalyticsTabsWithNoJobs': 8,
'tests.functional.model_analytics.CronCleanupTest': 14,
'tests.functional.model_analytics.MapReduceSimpleTest': 1,
'tests.functional.model_analytics.ProgressAnalyticsTest': 8,
'tests.functional.model_analytics.QuestionAnalyticsTest': 3,
'tests.functional.model_courses.CourseCachingTest': 5,
'tests.functional.model_data_sources.PaginatedTableTest': 17,
'tests.functional.model_data_sources.PiiExportTest': 4,
'tests.functional.model_entities.BaseEntityTestCase': 3,
'tests.functional.model_entities.ExportEntityTestCase': 2,
'tests.functional.model_entities.EntityTransformsTest': 4,
'tests.functional.model_jobs.JobOperationsTest': 15,
'tests.functional.model_models.BaseJsonDaoTestCase': 1,
'tests.functional.model_models.ContentChunkTestCase': 15,
'tests.functional.model_models.EventEntityTestCase': 1,
'tests.functional.model_models.MemcacheManagerTestCase': 4,
'tests.functional.model_models.PersonalProfileTestCase': 1,
'tests.functional.model_models.QuestionDAOTestCase': 3,
'tests.functional.model_models.StudentAnswersEntityTestCase': 1,
'tests.functional.model_models.StudentProfileDAOTestCase': 6,
'tests.functional.model_models.StudentPropertyEntityTestCase': 1,
'tests.functional.model_models.StudentTestCase': 3,
'tests.functional.model_student_work.KeyPropertyTest': 4,
'tests.functional.model_student_work.ReviewTest': 3,
'tests.functional.model_student_work.SubmissionTest': 3,
'tests.functional.model_utils.QueryMapperTest': 4,
'tests.functional.model_vfs.VfsLargeFileSupportTest': 6,
'tests.functional.module_config_test.ManipulateAppYamlFileTest': 8,
'tests.functional.module_config_test.ModuleIncorporationTest': 8,
'tests.functional.module_config_test.ModuleManifestTest': 7,
'tests.functional.modules_admin.AdminDashboardTabTests': 4,
'tests.functional.modules_analytics.StudentAggregateTest': 6,
'tests.functional.modules_balancer.ExternalTaskTest': 3,
'tests.functional.modules_balancer.ManagerTest': 10,
'tests.functional.modules_balancer.ProjectRestHandlerTest': 5,
'tests.functional.modules_balancer.TaskRestHandlerTest': 20,
'tests.functional.modules_balancer.WorkerPoolTest': 2,
'tests.functional.modules_certificate.CertificateHandlerTestCase': 5,
'tests.functional.modules_certificate.CertificateCriteriaTestCase': 6,
'tests.functional.modules_code_tags.CodeTagTests': 4,
'tests.functional.modules_core_tags.GoogleDriveRESTHandlerTest': 8,
'tests.functional.modules_core_tags.GoogleDriveTagRendererTest': 6,
'tests.functional.modules_core_tags.RuntimeTest': 13,
'tests.functional.modules_core_tags.TagsInclude': 8,
'tests.functional.modules_core_tags.TagsMarkdown': 1,
'tests.functional.modules_courses.AccessDraftsTestCase': 2,
'tests.functional.modules_dashboard.CourseOutlineTestCase': 1,
'tests.functional.modules_dashboard.DashboardAccessTestCase': 3,
'tests.functional.modules_dashboard.QuestionDashboardTestCase': 9,
'tests.functional.modules_dashboard.RoleEditorTestCase': 3,
'tests.functional.modules_data_pump.SchemaConversionTests': 1,
'tests.functional.modules_data_pump.StudentSchemaValidationTests': 2,
'tests.functional.modules_data_pump.PiiTests': 7,
'tests.functional.modules_data_pump.BigQueryInteractionTests': 36,
'tests.functional.modules_data_pump.UserInteractionTests': 4,
'tests.functional.modules_data_source_providers.CourseElementsTest': 11,
'tests.functional.modules_data_source_providers.StudentScoresTest': 6,
'tests.functional.modules_data_source_providers.StudentsTest': 5,
'tests.functional.modules_extra_tabs.ExtraTabsTests': 7,
'tests.functional.modules_i18n_dashboard.CourseContentTranslationTests': 15,
'tests.functional.modules_i18n_dashboard.IsTranslatableRestHandlerTests': 3,
'tests.functional.modules_i18n_dashboard.I18nDashboardHandlerTests': 4,
'tests.functional.modules_i18n_dashboard'
'.I18nProgressDeferredUpdaterTests': 5,
'tests.functional.modules_i18n_dashboard.LazyTranslatorTests': 5,
'tests.functional.modules_i18n_dashboard.ResourceBundleKeyTests': 2,
'tests.functional.modules_i18n_dashboard.ResourceRowTests': 6,
'tests.functional.modules_i18n_dashboard'
'.TranslationConsoleRestHandlerTests': 8,
'tests.functional.modules_i18n_dashboard'
'.TranslationConsoleValidationTests': 5,
'tests.functional.modules_i18n_dashboard.TranslationImportExportTests': 53,
'tests.functional.modules_i18n_dashboard.TranslatorRoleTests': 2,
'tests.functional.modules_i18n_dashboard.SampleCourseLocalizationTest': 16,
'tests.functional.modules_i18n_dashboard_jobs.BaseJobTest': 9,
'tests.functional.modules_i18n_dashboard_jobs.DeleteTranslationsTest': 3,
'tests.functional.modules_i18n_dashboard_jobs.DownloadTranslationsTest': 5,
'tests.functional.modules_i18n_dashboard_jobs.RoundTripTest': 1,
'tests.functional.modules_i18n_dashboard_jobs'
'.TranslateToReversedCaseTest': 1,
'tests.functional.modules_i18n_dashboard_jobs.UploadTranslationsTest': 5,
'tests.functional.modules_invitation.InvitationHandlerTests': 16,
'tests.functional.modules_invitation.ProfileViewInvitationTests': 5,
'tests.functional.modules_invitation.SantitationTests': 1,
'tests.functional.modules_manual_progress.ManualProgressTest': 24,
'tests.functional.modules_math.MathTagTests': 3,
'tests.functional.modules_notifications.CronTest': 9,
'tests.functional.modules_notifications.DatetimeConversionTest': 1,
'tests.functional.modules_notifications.ManagerTest': 31,
'tests.functional.modules_notifications.NotificationTest': 8,
'tests.functional.modules_notifications.PayloadTest': 6,
'tests.functional.modules_notifications.SerializedPropertyTest': 2,
'tests.functional.modules_notifications.StatsTest': 2,
'tests.functional.modules_oeditor.ObjectEditorTest': 4,
'tests.functional.modules_questionnaire.QuestionnaireDataSourceTests': 2,
'tests.functional.modules_questionnaire.QuestionnaireTagTests': 3,
'tests.functional.modules_questionnaire.QuestionnaireRESTHandlerTests': 5,
'tests.functional.modules_rating.ExtraContentProvideTests': 4,
'tests.functional.modules_rating.RatingHandlerTests': 15,
'tests.functional.modules_search.SearchTest': 12,
'tests.functional.modules_skill_map.CountSkillCompletionsTests': 3,
'tests.functional.modules_skill_map.LocationListRestHandlerTests': 2,
'tests.functional.modules_skill_map.SkillAggregateRestHandlerTests': 6,
'tests.functional.modules_skill_map.SkillCompletionTrackerTests': 6,
'tests.functional.modules_skill_map.SkillGraphTests': 11,
'tests.functional.modules_skill_map.SkillI18nTests': 5,
'tests.functional.modules_skill_map.SkillMapAnalyticsTabTests': 2,
'tests.functional.modules_skill_map.SkillMapHandlerTests': 4,
'tests.functional.modules_skill_map.SkillMapMetricTests': 10,
'tests.functional.modules_skill_map.SkillMapTests': 4,
'tests.functional.modules_skill_map.SkillRestHandlerTests': 12,
'tests.functional.modules_skill_map.StudentSkillViewWidgetTests': 6,
'tests.functional.modules_unsubscribe.GetUnsubscribeUrlTests': 1,
'tests.functional.modules_unsubscribe.SubscribeAndUnsubscribeTests': 4,
'tests.functional.modules_unsubscribe.UnsubscribeHandlerTests': 4,
'tests.functional.modules_usage_reporting.ConsentBannerTests': 4,
'tests.functional.modules_usage_reporting.ConsentBannerRestHandlerTests': 3,
'tests.functional.modules_usage_reporting.ConfigTests': 3,
'tests.functional.modules_usage_reporting.CourseCreationTests': 4,
'tests.functional.modules_usage_reporting.DevServerTests': 2,
'tests.functional.modules_usage_reporting.EnrollmentTests': 3,
'tests.functional.modules_usage_reporting.MessagingTests': 8,
'tests.functional.modules_usage_reporting.UsageReportingTests': 3,
'tests.functional.progress_percent.ProgressPercent': 4,
'tests.functional.review_module.ManagerTest': 55,
'tests.functional.review_peer.ReviewStepTest': 3,
'tests.functional.review_peer.ReviewSummaryTest': 5,
'tests.functional.student_answers.StudentAnswersAnalyticsTest': 1,
'tests.functional.student_labels.StudentLabelsTest': 32,
'tests.functional.student_last_location.NonRootCourse': 9,
'tests.functional.student_last_location.RootCourse': 3,
'tests.functional.student_tracks.StudentTracksTest': 10,
'tests.functional.review_stats.PeerReviewAnalyticsTest': 1,
'tests.functional.roles.RolesTest': 24,
'tests.functional.upload_module.TextFileUploadHandlerTestCase': 8,
'tests.functional.test_classes.ActivityTest': 2,
'tests.functional.test_classes.AdminAspectTest': 9,
'tests.functional.test_classes.AssessmentTest': 2,
'tests.functional.test_classes.CourseAuthorAspectTest': 4,
'tests.functional.test_classes.CourseAuthorCourseCreationTest': 1,
'tests.functional.test_classes.CourseUrlRewritingTest': 44,
'tests.functional.test_classes.DatastoreBackedCustomCourseTest': 6,
'tests.functional.test_classes.DatastoreBackedSampleCourseTest': 44,
'tests.functional.test_classes.EtlMainTestCase': 42,
'tests.functional.test_classes.EtlRemoteEnvironmentTestCase': 0,
'tests.functional.test_classes.InfrastructureTest': 21,
'tests.functional.test_classes.I18NTest': 2,
'tests.functional.test_classes.LessonComponentsTest': 2,
'tests.functional.test_classes.MemcacheTest': 65,
'tests.functional.test_classes.MultipleCoursesTest': 1,
'tests.functional.test_classes.NamespaceTest': 2,
'tests.functional.test_classes.StaticHandlerTest': 1,
'tests.functional.test_classes.StudentAspectTest': 19,
'tests.functional.test_classes.StudentUnifiedProfileTest': 19,
'tests.functional.test_classes.TransformsEntitySchema': 1,
'tests.functional.test_classes.TransformsJsonFileTestCase': 3,
'tests.functional.test_classes.VirtualFileSystemTest': 44,
'tests.functional.test_classes.ImportActivityTests': 7,
'tests.functional.test_classes.ImportAssessmentTests': 3,
'tests.functional.test_classes.ImportGiftQuestionsTests': 1,
'tests.functional.unit_assessment.UnitPrePostAssessmentTest': 17,
'tests.functional.unit_description.UnitDescriptionsTest': 1,
'tests.functional.unit_header_footer.UnitHeaderFooterTest': 11,
'tests.functional.unit_on_one_page.UnitOnOnePageTest': 3,
'tests.functional.whitelist.WhitelistTest': 12,
'tests.integration.test_classes': 19,
'tests.unit.etl_mapreduce.HistogramTests': 5,
'tests.unit.etl_mapreduce.FlattenJsonTests': 4,
'tests.unit.common_catch_and_log.CatchAndLogTests': 6,
'tests.unit.common_locales.LocalesTests': 2,
'tests.unit.common_locales.ParseAcceptLanguageTests': 6,
'tests.unit.common_resource.ResourceKeyTests': 3,
'tests.unit.common_schema_fields.SchemaFieldTests': 4,
'tests.unit.common_schema_fields.FieldArrayTests': 3,
'tests.unit.common_schema_fields.FieldRegistryTests': 7,
'tests.unit.common_safe_dom.NodeListTests': 4,
'tests.unit.common_safe_dom.TextTests': 2,
'tests.unit.common_safe_dom.ElementTests': 17,
'tests.unit.common_safe_dom.ScriptElementTests': 3,
'tests.unit.common_safe_dom.EntityTests': 11,
'tests.unit.common_tags.CustomTagTests': 13,
'tests.unit.common_utils.CommonUnitTests': 11,
'tests.unit.common_utils.ZipAwareOpenTests': 2,
'tests.unit.javascript_tests.AllJavaScriptTests': 9,
'tests.unit.models_analytics.AnalyticsTests': 5,
'tests.unit.models_courses.WorkflowValidationTests': 13,
'tests.unit.models_transforms.JsonToDictTests': 13,
'tests.unit.models_transforms.JsonParsingTests': 3,
'tests.unit.models_transforms.StringValueConversionTests': 2,
'tests.unit.modules_dashboard.TabTests': 6,
'tests.unit.modules_search.ParserTests': 10,
'tests.unit.test_classes.DeepDictionaryMergeTest': 5,
'tests.unit.test_classes.EtlRetryTest': 3,
'tests.unit.test_classes.InvokeExistingUnitTest': 5,
'tests.unit.test_classes.ReviewModuleDomainTests': 1,
'tests.unit.test_classes.SuiteTestCaseTest': 3,
'tests.unit.gift_parser_tests.SampleQuestionsTest': 1,
'tests.unit.gift_parser_tests.TestEssayAndNumericQuestion': 4,
'tests.unit.gift_parser_tests.TestMatchQuestion': 3,
'tests.unit.gift_parser_tests.TestMissingWordQuestion': 2,
'tests.unit.gift_parser_tests.TestShortAnswerQuestion': 3,
'tests.unit.gift_parser_tests.TestTrueFalseQuestion': 2,
'tests.unit.gift_parser_tests.TestMultiChoiceMultipleSelectionQuestion': 3,
'tests.unit.gift_parser_tests.TestHead': 2,
'tests.unit.gift_parser_tests.TestMultiChoiceQuestion': 5,
'tests.unit.gift_parser_tests.TestCreateManyGiftQuestion': 1
}
EXPENSIVE_TESTS = ['tests.integration.test_classes']
LOG_LINES = []
LOG_LOCK = threading.Lock()
def log(message):
with LOG_LOCK:
line = '%s\t%s' % (
datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), message)
LOG_LINES.append(line)
print line
def all_third_party_tests():
yaml_path = os.path.join(os.path.dirname(__file__),
'third_party_tests.yaml')
if os.path.exists(yaml_path):
with open(yaml_path) as fp:
data = yaml.load(fp)
return data['tests']
else:
return {}
def run(exe, strict=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
verbose=False):
"""Runs a shell command and captures the stdout and stderr output."""
p = subprocess.Popen(exe, stdout=stdout, stderr=stderr)
last_stdout, last_stderr = p.communicate()
result = []
if last_stdout:
for line in last_stdout:
result.append(line)
if last_stderr:
for line in last_stderr:
result.append(line)
result = ''.join(result)
if p.returncode != 0 and verbose and 'KeyboardInterrupt' not in result:
exe_string = ' '.join(exe)
print '#########vvvvv########### Start of output from >>>%s<<< ' % (
exe_string)
print result
print '#########^^^^^########### End of output from >>>%s<<<' % (
exe_string)
if p.returncode != 0 and strict:
raise Exception('Error %s\n%s' % (p.returncode, result))
return p.returncode, result
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def execute_task_list(
cls, tasks,
chunk_size=None, runtimes_sec=None, fail_on_first_error=False):
if chunk_size is None:
chunk_size = len(tasks)
assert chunk_size > 0
assert chunk_size < 256
if runtimes_sec is None:
runtimes_sec = []
errors = []
todo = [] + tasks
running = set()
task_to_runtime_sec = {}
def on_error(error, task):
errors.append(error)
log(Exception(error))
log('Failed task: %s.' % task.name)
if fail_on_first_error:
raise Exception(error)
def update_progress():
log(
'Progress so far: '
'%s failed, %s completed, %s running, %s pending.' % (
len(errors), len(tasks) - len(todo) - len(running),
len(running), len(todo)))
last_update_on = 0
while todo or running:
# update progress
now = time.time()
update_frequency_sec = 15
if now - last_update_on > update_frequency_sec:
last_update_on = now
update_progress()
# check status of running jobs
if running:
for task in list(running):
task.join(1)
if task.isAlive():
start, end = task_to_runtime_sec[task]
now = time.time()
if now - end > 60:
log('Waiting over %ss for: %s' % (
int(now - start), task.name))
task_to_runtime_sec[task] = (start, now)
continue
if task.exception:
on_error(task.exception, task)
start, _ = task_to_runtime_sec[task]
now = time.time()
task_to_runtime_sec[task] = (start, now)
running.remove(task)
# submit new work
while len(running) < chunk_size and todo:
task = todo.pop(0)
running.add(task)
now = time.time()
task_to_runtime_sec[task] = (now, now)
task.start()
update_progress()
if errors:
raise Exception('There were %s errors' % len(errors))
# format runtimes
for task in tasks:
start, end = task_to_runtime_sec[task]
runtimes_sec.append(end - start)
def run(self):
try:
self.func()
except Exception as e: # pylint: disable=broad-except
self.exception = e
class FunctionalTestTask(object):
"""Executes a set of tests given a test class name."""
def __init__(self, test_class_name, verbose):
self.test_class_name = test_class_name
self.verbose = verbose
def run(self):
if self.verbose:
log('Running all tests in: %s.' % (self.test_class_name))
test_sh = os.path.join(os.path.dirname(__file__), 'test.sh')
result, self.output = run(
['sh', test_sh, self.test_class_name],
verbose=self.verbose)
if result != 0:
raise Exception()
def setup_all_dependencies():
"""Setup all third party Python packages."""
common_sh = os.path.join(os.path.dirname(__file__), 'common.sh')
result, output = run(['sh', common_sh], strict=True)
if result != 0:
raise Exception()
for line in output.split('\n'):
if not line:
continue
# ignore garbage produced by the script; it proven impossible to fix the
# script to avoid garbage from being produced
if 'grep: write error' in line or 'grep: writing output' in line:
continue
log(line)
def chunk_list(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def run_all_tests(skip_expensive_tests, verbose, setup_deps=True):
"""Runs all functional tests concurrently."""
test_classes = {}
test_classes.update(ALL_TEST_CLASSES)
test_classes.update(all_third_party_tests())
run_tests(test_classes, skip_expensive_tests, verbose, setup_deps)
def run_tests(test_classes, skip_expensive_tests, verbose, setup_deps=True):
start = time.time()
task_to_test = {}
tasks = []
# Prepare tasks
for test_class_name in test_classes:
if skip_expensive_tests and test_class_name in EXPENSIVE_TESTS:
continue
test = FunctionalTestTask(test_class_name, verbose)
task = TaskThread(test.run, name='testing %s' % test_class_name)
task_to_test[task] = test
tasks.append(task)
# order tests by their size largest to smallest
tasks = sorted(
tasks,
key=lambda task: test_classes.get(task_to_test[task].test_class_name),
reverse=True)
# setup dependencies
if setup_deps:
setup_all_dependencies()
# execute all tasks
log('Executing all %s test suites' % len(tasks))
runtimes_sec = []
TaskThread.execute_task_list(
tasks, chunk_size=16, runtimes_sec=runtimes_sec)
# map durations to names
name_durations = []
for index, duration in enumerate(runtimes_sec):
name_durations.append((
round(duration, 2), task_to_test[tasks[index]].test_class_name))
# report all longest first
log('Reporting execution times for 10 longest tests')
for duration, name in sorted(
name_durations, key=lambda name_duration: name_duration[0],
reverse=True)[:10]:
log('Took %ss for %s' % (int(duration), name))
# Check we ran all tests as expected.
total_count = 0
for task in tasks:
test = task_to_test[task]
# Check that no unexpected tests were picked up via automatic discovery,
# and that the number of tests run in a particular suite.py invocation
# matches the expected number of tests.
test_count = test_classes.get(test.test_class_name, None)
expected_text = 'INFO: All %s tests PASSED!' % test_count
if test_count is None:
log('%s\n\nERROR: ran unexpected test class %s' % (
test.output, test.test_class_name))
if expected_text not in test.output:
log('%s\n\nERROR: Expected %s tests to be run for the test class '
'%s, but found some other number.' % (
test.output, test_count, test.test_class_name))
raise Exception()
total_count += test_count
log('Ran %s tests in %s test classes; took %ss' % (
total_count, len(tasks), int(time.time() - start)))
def run_lint(source_dir=None):
# Wire outputs to our own stdout/stderr so messages appear immediately,
# rather than batching up and waiting for the end (linting takes a while)
path = 'scripts/pylint.sh'
if source_dir:
path = os.path.join(source_dir, path)
status = subprocess.call(path, stdin=None, stdout=sys.stdout,
stderr=sys.stderr)
return status == 0
def main():
if not run_lint():
raise RuntimeError('Lint checks failed; tests not run.')
run_all_tests(False, True)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage fetching and installation of extension modules to CourseBuilder.
To run, use the wrapper script in this directory since it configures paths for
you:
sh scripts/modules.sh [args]
For example, to bring in the LTI module you can run
sh scripts/modules.sh \
--targets=lti@https://github.com/google/coursebuilder-lti-module
"""
__author__ = 'Mike Gainer (mgainer@google.com)'
import argparse
import collections
import logging
import os
import subprocess
import sys
import time
from common import yaml_files
# Standard name of manifest file within a module.
_MANIFEST_NAME = 'module.yaml'
# Number of attempts to kill wayward subprocesses before giving up entirely.
_KILL_ATTEMPTS = 5
# Command line flags supported
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--targets', default=[], type=lambda s: s.split(','),
help=(
'List of modules to use. Multiple modules may be listed separated by '
'commas. If a module has already been downloaded, or is on the '
'list of well-known modules (see scripts/module_config.py source), '
'then the module source need not be provided. If the module needs '
'to be downloaded, then the name of the module should be followed by '
'an "@" character, and then the URL at which the module is '
'available. E.g., '
'--modules=example@https://github.com/my-company/my_example_module'
))
# Logging.
_LOG = logging.getLogger('coursebuilder.models.module_config')
logging.basicConfig()
_LOG.setLevel(logging.INFO)
# Convenience types with just data, no behavior.
WellKnownModule = collections.namedtuple(
'WellKnownModule', ['name', 'method', 'location'])
# List of modules for which we already know the source location.
_WELL_KNOWN_MODULES = {
'lti': WellKnownModule(
'lti', 'git',
'https://github.com/google/coursebuilder-lti-module'),
'xblock': WellKnownModule(
'xblock', 'git',
'https://github.com/google/coursebuilder_xblock_module'),
}
def _die(message):
_LOG.critical(message)
raise Exception(message)
def _assert_path_exists(path, message):
if not os.path.exists(path):
_die(message)
def _run_process(args, patience_seconds=10):
proc = subprocess.Popen(args)
cmdline = ' '.join(args)
start = time.time()
max_expected = start + patience_seconds
absolute_max = start + patience_seconds + _KILL_ATTEMPTS
while time.time() < absolute_max:
if time.time() > max_expected:
proc.kill()
proc.poll()
if proc.returncode is not None:
if proc.returncode == 0:
return
else:
_die('The command "%s" completed with exit code %d. Please '
'run that command manually, ascertain and remedy the '
'problem, and try again.' % (cmdline, proc.returncode))
sys.exit(1)
_die('The command "%s" failed to complete after %d seconds, and '
'%d attempts to kill it. You should manually kill the process. '
'Please run that command manually and ascertain and remedy the '
'problem.' % (cmdline, int(time.time() - start), _KILL_ATTEMPTS))
def _download_if_needed(name, location, module_install_dir):
manifest_path = os.path.join(module_install_dir, _MANIFEST_NAME)
if os.path.exists(manifest_path):
return
_LOG.info('Downloading module %s', name)
all_modules_dir = os.path.dirname(module_install_dir)
if not os.path.exists(all_modules_dir):
os.makedirs(all_modules_dir)
if name in _WELL_KNOWN_MODULES:
method = _WELL_KNOWN_MODULES[name].method
if not location:
location = _WELL_KNOWN_MODULES[name].location
else:
if not location:
_die('Module "%s" needs to be downloaded, but its location was '
'not provided on the command line.' % name)
method = _infer_method_from_location(location)
if method == 'git':
_run_process(['git', 'clone', location, module_install_dir])
elif method == 'cp-r':
_run_process(['cp', '-r', location, module_install_dir])
else:
_die('We would like to download module "%s" ' % name +
'from location "%s", ' % location +
'but no implementation for downloading via %s ' % method +
'has been implemented as yet.')
_assert_path_exists(
manifest_path,
'Modules are expected to contain ' +
'a manifest file named "%s" ' % _MANIFEST_NAME +
'in their root directory when installed. ' +
'Module %s at path %s does not. ' % (name, manifest_path))
def _infer_method_from_location(location):
# Not terribly sophisticated. When modules start showing up at places
# other than github, we can make this a little smarter.
if location.startswith('https://github.com/'):
return 'git'
# For testing, and/or pulling in not-really-third party stuff from
# elsewhere in a local work environment.
if (location.startswith('/tmp/') or
location.startswith('/var/folders/') or
location.startswith('../')):
return 'cp-r'
return 'unknown'
def _install_if_needed(app_yaml, name, module_install_dir, coursebuilder_home):
# Verify version compatibility before attempting installation.
coursebuilder_version = app_yaml.get_env('GCB_PRODUCT_VERSION')
module = yaml_files.ModuleManifest(
os.path.join(module_install_dir, _MANIFEST_NAME))
module.assert_version_compatibility(coursebuilder_version)
# This is the best we can do as far as verifying that a module has been
# installed. Modules have quite a bit of free rein as far as what-all
# they may or may not do -- setting up $CB/modules/<modulename> is the
# only hard requirement. Note that this may even be a softlink, so
# we just test for existence, not is-a-directory.
if os.path.exists(os.path.join(coursebuilder_home, 'modules', name)):
return module
_LOG.info('Installing module %s', name)
# Verify setup script exists and give a nice error message if not (rather
# than letting _run_process emit an obscure error).
install_script_path = os.path.join(
module_install_dir, 'scripts', 'setup.sh')
_assert_path_exists(
install_script_path,
'Modules are expected to provide a script to perform installation of '
'the module at <module-root>/scripts/setup.sh No such file was found '
'in module %s' % name)
# Have $PWD set to the install directory for the module when calling
# setup.sh, just in case the setup script needs to discover its own
# location in order to set up softlinks.
cwd = os.getcwd()
try:
os.chdir(module_install_dir)
_run_process(['bash', install_script_path, '-d', coursebuilder_home])
finally:
os.chdir(cwd)
# Verify setup script exists and give a nice error message if not (rather
# than letting _run_process emit an obscure error).
install_script_path = os.path.join(
module_install_dir, 'scripts', 'setup.sh')
init_file_path = os.path.join(
coursebuilder_home, 'modules', name, '__init__.py')
_assert_path_exists(
init_file_path,
'After installing module %s, there should have been an __init__.py '
'file present at the path %s, but there was not.' % (
name, init_file_path))
return module
def _update_appengine_libraries(app_yaml, modules):
for module in modules:
for lib in module.appengine_libraries:
app_yaml.require_library(lib['name'], lib['version'])
def _construct_third_party_libraries(modules):
libs_str_parts = []
libs = {}
for module in modules:
for lib in module.third_party_libraries:
name = lib['name']
internal_path = lib.get('internal_path')
if lib['name'] in libs:
if internal_path != libs[name]:
raise ValueError(
'Module %s ' % module.module_name +
'specifies third party library "%s" ' % name +
'with internal path "%s" ' % internal_path +
'but this is incompatible with the '
'already-specified internal path "%s"' % libs[name])
else:
if internal_path:
libs_str_parts.append(' %s:%s' % (name, internal_path))
else:
libs_str_parts.append(' %s' % name)
return ''.join(libs_str_parts)
def _update_third_party_libraries(app_yaml, modules):
libs_str = _construct_third_party_libraries(modules)
app_yaml.set_env('GCB_THIRD_PARTY_LIBRARIES', libs_str)
def _update_enabled_modules(app_yaml, modules):
new_val = ' '.join([module.main_module for module in modules])
app_yaml.set_env('GCB_THIRD_PARTY_MODULES', new_val)
def _update_tests(coursebuilder_home, modules):
tests = {}
for module in modules:
tests.update(module.tests)
yaml_path = os.path.join(coursebuilder_home, 'scripts',
'third_party_tests.yaml')
if tests:
_LOG.info('Updating scripts/third_party_tests.yaml')
with open(yaml_path, 'w') as fp:
fp.write('tests:\n')
for test in sorted(tests):
fp.write(' %s: %d\n' % (test, tests[test]))
else:
if os.path.exists(yaml_path):
os.unlink(yaml_path)
def main(args, coursebuilder_home, modules_home):
modules = []
app_yaml = yaml_files.AppYamlFile(
os.path.join(coursebuilder_home, 'app.yaml'))
for module_name in args.targets:
parts = module_name.split('@')
name = parts[0]
location = parts[1] if len(parts) > 1 else None
install_dir = os.path.join(modules_home, name)
_download_if_needed(name, location, install_dir)
module = _install_if_needed(app_yaml, name, install_dir,
coursebuilder_home)
modules.append(module)
_update_tests(coursebuilder_home, modules)
_LOG.info('Updating app.yaml')
_update_appengine_libraries(app_yaml, modules)
_update_third_party_libraries(app_yaml, modules)
_update_enabled_modules(app_yaml, modules)
app_yaml.write()
if app_yaml.application == 'mycourse':
_LOG.warning('The application name in app.yaml is "mycourse". You '
'should change this from its default value before '
'uploading to AppEngine.')
if __name__ == '__main__':
main(PARSER.parse_args(),
os.environ['COURSEBUILDER_HOME'],
os.environ['MODULES_HOME'])
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom configurations and functions for Google App Engine."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
import datetime
import importlib
import logging
import os
import sys
# configure Appstats
appstats_MAX_STACK = 20
# Whether we are running in the production environment.
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
# Set this flag to true to enable bulk downloads of Javascript/CSS files in lib
BUNDLE_LIB_FILES = True
# this is the official location of this app for computing of all relative paths
BUNDLE_ROOT = os.path.dirname(__file__)
# make all Windows and Linux paths have the same separator '/'
BUNDLE_ROOT = BUNDLE_ROOT.replace('\\', '/')
CODE_ROOT = BUNDLE_ROOT
# Default namespace name is '' and not None.
DEFAULT_NAMESPACE_NAME = ''
class _Library(object):
"""DDO that represents a Python library contained in a .zip file."""
def __init__(self, zipfile, relative_path=None):
self._relative_path = relative_path
self._zipfile = zipfile
@property
def file_path(self):
"""Path to the library's file on disk."""
return os.path.join(BUNDLE_ROOT, 'lib', self._zipfile)
@property
def full_path(self):
"""Full path for imports, containing archive-relative paths if any."""
path = self.file_path
if self._relative_path:
path = os.path.join(path, self._relative_path)
return path
# Third-party library zip files.
THIRD_PARTY_LIBS = [
_Library('babel-0.9.6.zip'),
_Library('html5lib-0.95.zip'),
_Library('httplib2-0.8.zip', relative_path='httplib2-0.8/python2'),
_Library('gaepytz-2011h.zip'),
_Library(
'google-api-python-client-1.1.zip',
relative_path='google-api-python-client-1.1'),
_Library('mapreduce-r645.zip'),
_Library('markdown-2.5.zip', relative_path='Markdown-2.5'),
_Library('mrs-mapreduce-0.9.zip', relative_path='mrs-mapreduce-0.9'),
_Library('python-gflags-2.0.zip', relative_path='python-gflags-2.0'),
_Library('oauth-1.0.1.zip', relative_path='oauth'),
_Library('pyparsing-1.5.7.zip'),
_Library('networkx-1.9.1.zip', relative_path='networkx-1.9.1'),
_Library('decorator-3.4.0.zip', relative_path='src'),
_Library('reportlab-3.1.8.zip'),
]
def gcb_force_default_encoding(encoding):
"""Force default encoding to a specific value."""
# Eclipse silently sets default encoding to 'utf-8', while GAE forces
# 'ascii'. We need to control this directly for consistency.
if sys.getdefaultencoding() != encoding:
reload(sys)
sys.setdefaultencoding(encoding)
def _third_party_libs_from_env():
ret = []
for lib_config in os.environ.get('GCB_THIRD_PARTY_LIBRARIES', '').split():
parts = lib_config.split(':')
if len(parts) == 1:
ret.append(_Library(parts[0]))
else:
ret.append(_Library(parts[0], relative_path=parts[1]))
return ret
def gcb_init_third_party():
"""Add all third party libraries to system path."""
for lib in THIRD_PARTY_LIBS + _third_party_libs_from_env():
if not os.path.exists(lib.file_path):
raise Exception('Library does not exist: %s' % lib.file_path)
sys.path.insert(0, lib.full_path)
def gcb_appstats_enabled():
return 'True' == os.environ.get('GCB_APPSTATS_ENABLED')
def webapp_add_wsgi_middleware(app):
"""Enable AppStats if requested."""
if gcb_appstats_enabled():
logging.info('Enabling AppStats.')
from google.appengine.ext.appstats import recording
app = recording.appstats_wsgi_middleware(app)
return app
def _import_and_enable_modules(env_var, reraise=False):
for module_name in os.environ.get(env_var, '').split():
option = 'enabled'
if module_name.count('='):
module_name, option = module_name.split('=', 1)
try:
operation = 'importing'
module = importlib.import_module(module_name)
operation = 'registering'
custom_module = module.register_module()
if option is 'enabled':
operation = 'enabling'
custom_module.enable()
except Exception, ex: # pylint: disable=broad-except
logging.exception('Problem %s module "%s"', operation, module_name)
if reraise:
raise ex
def import_and_enable_modules():
_import_and_enable_modules('GCB_REGISTERED_MODULES')
_import_and_enable_modules('GCB_REGISTERED_MODULES_CUSTOM')
_import_and_enable_modules('GCB_THIRD_PARTY_MODULES')
def time_delta_to_millis(delta):
"""Converts time delta into total number of milliseconds."""
millis = delta.days * 24 * 60 * 60 * 1000
millis += delta.seconds * 1000
millis += delta.microseconds / 1000
return millis
def timeandlog(name, duration_only=False):
"""Times and logs execution of decorated method."""
def timed_1(func):
def timed_2(*args, **kwargs):
_name = name
if args and isinstance(args[0], type):
_name += '.' + str(args[0].__name__)
before = datetime.datetime.utcnow()
if not duration_only:
log_appstats_event(_name + '.enter')
result = func(*args, **kwargs)
after = datetime.datetime.utcnow()
millis = time_delta_to_millis(after - before)
if duration_only:
logging.info(_name + ': duration=%sms' % millis)
log_appstats_event(_name, {'millis': millis})
else:
logging.info(_name + '.leave: duration=%sms' % millis)
log_appstats_event(_name + '.leave', {'millis': millis})
return result
if gcb_appstats_enabled():
return timed_2
else:
return func
return timed_1
def log_appstats_event(label, data=None):
if gcb_appstats_enabled():
try:
from google.appengine.ext.appstats.recording import recorder_proxy
if recorder_proxy and (
recorder_proxy.has_recorder_for_current_request()):
recorder_proxy.record_custom_event(label=label, data=data)
except Exception: # pylint: disable=broad-except
logging.exception('Failed to record Appstats event %s.', label)
gcb_init_third_party()
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module providing handlers for running jobs added to task queue in tests."""
__author__ = 'Mike Gainer (mgainer@google.com)'
# The following import is needed in order to configure modules.mapreduce
# before code in this module references it.
import base64
import re
import appengine_config # pylint: disable=unused-import
from common import utils as common_utils
from modules.mapreduce import mapreduce_module
from google.appengine.ext import deferred
class TaskQueueItemHandler(object):
def matches(self, task):
raise NotImplementedError(
'Classes derived from TaskQueueItemHandler are expected to '
'implement "matches()". This should return True or False.'
'If True, then the run() method in this class will be called '
'to process the task. The "task" parameter is a task as '
'found on the task queue. Tasks are just hashes with the '
'following keys: '
'"url": URL to which the content of the task is to be sent. '
'"body": Opaque string containing a body to be handled by '
' the specified URL handler'
'"name": Name of task on queue. Typically something like '
' task1, task2, ...'
'"eta_delta": Expected completion time as a delta.'
'"queue_name": Name of queue on which this task was found. '
' Typically "default".'
'"headers": Headers to be sent when the URL is POSTed or GETed.'
'"eta": Expected completion time.'
'"eta_usec": Microseconds of expected completion time'
'"method": GET or POST')
def run(self, task):
raise NotImplementedError(
'Classes derived from TaskQueueItemHandler are expected to '
'implement "run()". This method should operate to invoke '
'the handler associated with the URL in the task, or if that '
'handler is not available, perform the equivalent operations.')
class DeferredTaskQueueItemHandler(TaskQueueItemHandler):
"""Simulate operation of the '/_ah/queue/deferred' handler.
The 'deferred' library is a wrapper on top of AppEngine task queues.
In testing environments, this handler is not registered, and we need
to simulate its operation.
"""
def matches(self, task):
return task['url'] == '/_ah/queue/deferred'
def run(self, task):
data = base64.b64decode(task['body'])
deferred.run(data)
class MapReduceTaskQueueItemHandler(TaskQueueItemHandler):
"""Pass task queue items through to actually-registered Map/Reduce handlers.
The map/reduce internals enqueue a number of tasks onto the task queue
in order to complete a map/reduce pipeline. These are handled as normal
POSTs to the various handler URLs. However, given that we are at a
backleveled version of the AppEngine runtime, we need to do some minor
conversions in order to work around compatibility problems between
the old AppEngine runtime and some assumptions made by the Map Reduce code.
"""
def __init__(self, testapp):
self._testapp = testapp
self._mapreduce_path_regex = re.compile('|'.join(
['^' + path + r'/?(\?.*)?$' for path, unused_handler in
mapreduce_module.custom_module.global_routes]))
def matches(self, task):
match = self._mapreduce_path_regex.search(task['url'])
return match is not None
def run(self, task):
namespace = dict(task['headers']).get(
'X-AppEngine-Current-Namespace', '')
data = base64.b64decode(task['body'])
# Work around unicode/string non-conversion bug in old versions.
headers = {key: str(val) for key, val in task['headers']}
headers['Content-Length'] = str(len(data))
with common_utils.Namespace(namespace):
response = self._testapp.post(
url=str(task['url']), params=data, headers=headers)
if response.status_code != 200:
raise RuntimeError(
'Failed calling map/reduce task for url %s; response was %s' % (
task['url'],
str(response)))
class TaskQueueHandlerDispatcher(object):
def __init__(self, testapp, task_queue):
self._handlers = []
self._task_queue = task_queue
# We could establish the list of handlers for task item queues
# from module setup calls. On the other hand, that would be
# forcing test-related concerns into production code, so it's
# simpler to just nail these in by hand here.
self._handlers.append(DeferredTaskQueueItemHandler())
self._handlers.append(MapReduceTaskQueueItemHandler(testapp))
def dispatch_task(self, task):
found_handler = False
for handler in self._handlers:
if handler.matches(task):
found_handler = True
self._task_queue.DeleteTask(task['queue_name'], task['name'])
handler.run(task)
break
if not found_handler:
raise RuntimeError(
'Did not find any task queue handler to work on url "%s"' %
task['url'])
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for logger."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import unittest
import appengine_config
from common import catch_and_log
class CatchAndLogTests(unittest.TestCase):
def setUp(self):
appengine_config.PRODUCTION_MODE = False
self._catch_and_log = catch_and_log.CatchAndLog()
self._expected = []
def test_simple(self):
complaint = 'No cheese, Gromit!'
self._catch_and_log.critical(complaint)
self._expect(complaint, catch_and_log._CRITICAL)
self._assert_logs_match()
def test_multiple(self):
complaints = [
'Failed to find cheese in pantry',
'Failed to install cheese to mousetrap',
'Failed to arm trap',
'Failed to catch mouse'
]
for complaint in complaints:
self._catch_and_log.critical(complaint)
self._expect(complaint, catch_and_log._CRITICAL)
self._assert_logs_match()
def test_multiple_levels(self):
complaint = 'No cheese, Gromit!'
self._catch_and_log.critical(complaint)
self._expect(complaint, catch_and_log._CRITICAL)
complaint = 'Moon out of range!'
self._catch_and_log.warn(complaint)
self._expect(complaint, catch_and_log._WARNING)
complaint = 'Parking brake engaged!'
self._catch_and_log.warning(complaint)
self._expect(complaint, catch_and_log._WARNING)
complaint = 'Five mice spectating'
self._catch_and_log.info(complaint)
self._expect(complaint, catch_and_log._INFO)
complaint = 'Red light blinking'
self._catch_and_log.info(complaint)
self._expect(complaint, catch_and_log._INFO)
complaint = 'Low blinker fluid'
self._catch_and_log.warning(complaint)
self._expect(complaint, catch_and_log._WARNING)
complaint = 'Insufficient fuel for landing!'
self._catch_and_log.critical(complaint)
self._expect(complaint, catch_and_log._CRITICAL)
self._assert_logs_match()
def test_exception_suppressed(self):
topic = 'Entering Orbit'
complaint = 'Perigee below surface of Moon!'
with self._catch_and_log.consume_exceptions(topic):
raise ValueError(complaint)
self._expect(
'%s: ValueError: %s at ' % (topic, complaint) +
'File "/tests/unit/common_catch_and_log.py", line 86, in '
'test_exception_suppressed\n raise ValueError(complaint)\n',
catch_and_log._CRITICAL)
self._assert_logs_match()
def test_exception_propagates(self):
topic = 'Entering Orbit'
complaint = 'Perigee below surface of Moon!'
with self.assertRaises(ValueError):
with self._catch_and_log.propagate_exceptions(topic):
raise ValueError(complaint)
self._expect(
'%s: ValueError: %s at ' % (topic, complaint) +
'File "/tests/unit/common_catch_and_log.py", line 100, in '
'test_exception_propagates\n raise ValueError(complaint)\n',
catch_and_log._CRITICAL)
self._assert_logs_match()
def test_traceback_info_suppressed_in_production(self):
appengine_config.PRODUCTION_MODE = True
topic = 'Entering Orbit'
complaint = 'Perigee below surface of Moon!'
with self._catch_and_log.consume_exceptions(topic):
raise ValueError(complaint)
self._expect('%s: ValueError: %s' % (topic, complaint),
catch_and_log._CRITICAL)
self._assert_logs_match()
def _expect(self, message, level):
self._expected.append({
'message': message,
'level': level,
'timestamp': datetime.datetime.now().strftime(
catch_and_log._LOG_DATE_FORMAT)})
def _assert_logs_match(self):
if len(self._expected) != len(self._catch_and_log.get()):
self.fail('Expected %d entries, but have %d' %
(len(self._expected), len(self._catch_and_log.get())))
for expected, actual in zip(self._expected, self._catch_and_log.get()):
self.assertEquals(expected['level'], actual['level'])
self.assertEquals(expected['message'], actual['message'])
expected_time = datetime.datetime.strptime(
expected['timestamp'], catch_and_log._LOG_DATE_FORMAT)
actual_time = datetime.datetime.strptime(
actual['timestamp'], catch_and_log._LOG_DATE_FORMAT)
self.assertAlmostEqual(
0, abs((expected_time - actual_time).total_seconds()), 1)
| Python |
"""Unit tests for the common.sanitize module."""
__author__ = 'John Orr (jorr@google.com)'
import unittest
from common import safe_dom
class MockNode(safe_dom.Node):
def __init__(self, value):
super(MockNode, self).__init__()
self._value = value
@property
def sanitized(self):
return self._value
class NodeListTests(unittest.TestCase):
"""Unit tests for common.safe_dom.NodeList."""
def test_list(self):
"""NodeList should escape all its members."""
node_list = safe_dom.NodeList()
node_list.append(MockNode('a')).append(MockNode('b'))
self.assertEqual('ab', node_list.sanitized)
def test_len(self):
"""NodeList should support len."""
node_list = safe_dom.NodeList().append(
MockNode('a')).append(MockNode('b'))
self.assertEqual(2, len(node_list))
def test_append_node_list(self):
"""NodeList should support appending both Nodes and NodeLists."""
node_list = safe_dom.NodeList().append(
safe_dom.NodeList().append(MockNode('a')).append(MockNode('b'))
).append(MockNode('c'))
self.assertEqual('abc', node_list.__str__())
def test_insert_node_list(self):
"""NodeList should support inserting Nodes."""
node_list = safe_dom.NodeList()
node_list.append(MockNode('a')).append(MockNode('c'))
node_list.insert(1, MockNode('b'))
self.assertEqual('abc', node_list.__str__())
class TextTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Text."""
def test_text_sanitizes(self):
"""Text should sanitize unsafe characters."""
unsafe_string = '<script>'
text = safe_dom.Text(unsafe_string)
self.assertEqual('<script>', text.sanitized)
def test_str_returns_sanitized(self):
"""The _str__ method should return sanitized text."""
unsafe_string = '<script>'
text = safe_dom.Text(unsafe_string)
self.assertEqual('<script>', text.__str__())
class ElementTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Element."""
def test_build_simple_element(self):
"""Element should build an element without attributes or children."""
element = safe_dom.Element('p')
self.assertEqual('<p></p>', element.__str__())
def test_reject_bad_tag_names(self):
"""Element should reject bad tag names."""
bad_names = ['2a', 'a b', '@', '-q']
for name in bad_names:
try:
safe_dom.Element(name)
except AssertionError:
continue
self.fail('Expected an exception: "%s"' % name)
def test_reject_bad_attribute_names(self):
"""Element should reject bad attribute names."""
bad_names = ['2a', 'a b', '@', '-q']
for name in bad_names:
try:
safe_dom.Element('p', **{name: 'good value'})
except AssertionError:
continue
self.fail('Expected an exception: "%s"' % name)
def test_include_attributes(self):
"""Element should include tag attributes."""
element = safe_dom.Element('button', style='foo', onclick='action')
self.assertEqual(
'<button onclick="action" style="foo"></button>',
element.__str__())
def test_escape_quotes(self):
"""Element should escape single and double quote characters."""
element = safe_dom.Element('a', href='a\'b"c`d')
self.assertEqual(
'<a href="a'b"c`d"></a>', element.__str__())
def test_allow_parens(self):
"""Element should allow parentheses in attributes."""
element = safe_dom.Element('a', action='myAction()')
self.assertEqual('<a action="myAction()"></a>', element.__str__())
def test_allow_urls(self):
"""Element should allow urls with a method sepcified in an attribute."""
element = safe_dom.Element(
'a', action='http://a.b.com/d/e/f?var1=val1&var2=val2#fra')
self.assertEqual(
'<a action="http://a.b.com/d/e/f?var1=val1&var2=val2#fra"></a>',
element.__str__())
def test_url_query_chars(self):
"""Element should pass '?' and '=' characters in an attribute."""
element = safe_dom.Element('a', action='target?action=foo&value=bar')
self.assertEqual(
'<a action="target?action=foo&value=bar"></a>',
element.__str__())
def test_convert_none_to_empty(self):
"""An attribute with value None should render as empty."""
element = safe_dom.Element('a', action=None)
self.assertEqual('<a action=""></a>', element.__str__())
def test_coerce_className(self):
"""Element should replace the 'className' attrib with 'class'."""
element = safe_dom.Element('p', className='foo')
self.assertEqual('<p class="foo"></p>', element.__str__())
def test_include_children(self):
"""Element should include child elements."""
element = safe_dom.Element('a').add_child(
safe_dom.Element('b').add_child(
safe_dom.Element('c'))
).add_child(
safe_dom.Element('d'))
self.assertEqual('<a><b><c></c></b><d></d></a>', element.__str__())
def test_include_node_list(self):
"""Element should include a list of children."""
element = safe_dom.Element('a').add_children(
safe_dom.NodeList().append(MockNode('b')).append(MockNode('c')))
self.assertEqual('<a>bc</a>', element.__str__())
def test_sanitize_children(self):
"""Element should sanitize child elements as they are included."""
element = safe_dom.Element('td').add_child(
safe_dom.Element('a', href='foo"bar').add_text('1<2'))
self.assertEqual(
'<td><a href="foo"bar">1<2</a></td>', element.__str__())
def test_add_text(self):
"""Adding text should add text which will be sanitized."""
self.assertEqual(
'<a>1<2</a>', safe_dom.Element('a').add_text('1<2').__str__())
def test_add_attribute(self):
"""Attributes can be added after initialization."""
self.assertEqual(
'<a b="c" d="e" f="g" h="i"></a>',
safe_dom.Element(
'a', b='c', d='e').add_attribute(f='g', h='i').__str__())
def test_void_elements_have_no_end_tags(self):
"""Void elements should have no end tag, e.g., <br/>."""
void_elements = [
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input',
'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track',
'wbr']
for elt in void_elements:
self.assertEqual('<%s/>' % elt, safe_dom.Element(elt).__str__())
def test_empty_non_void_elememnts_should_have_end_tags(self):
"""Non-void elements should have their end tags, even when empty."""
sample_elements = ['p', 'textarea', 'div']
for elt in sample_elements:
self.assertEqual(
'<%s></%s>' % (elt, elt), safe_dom.Element(elt).__str__())
class ScriptElementTests(unittest.TestCase):
"""Unit tests for common.safe_dom.ScriptElement."""
def test_script_should_not_escape_body(self):
""""The body of the script tag should not be escaped."""
script = safe_dom.ScriptElement()
script.add_text('alert("foo");')
script.add_text('1 < 2 && 2 > 1;')
self.assertEqual(
'<script>alert("foo");1 < 2 && 2 > 1;</script>', script.__str__())
def test_script_should_reject_close_script_tag_in_body(self):
"""Expect an error if the body of the script tag contains </script>."""
script = safe_dom.ScriptElement()
script.add_text('</script>')
try:
script.__str__()
self.fail('Expected an exception')
except ValueError:
pass
def test_script_should_not_allow_child_nodes_to_be_added(self):
"""Script should not allow child nodes to be added."""
script = safe_dom.ScriptElement()
try:
child = safe_dom.Element('br')
script.add_child(child)
self.fail('Expected an exception')
except ValueError:
pass
try:
children = safe_dom.NodeList().append(safe_dom.Element('br'))
script.add_children(children)
self.fail('Expected an exception')
except ValueError:
pass
class EntityTests(unittest.TestCase):
"""Unit tests for common.safe_dom.Entity."""
def expect_pass(self, test_text):
entity = safe_dom.Entity(test_text)
self.assertEqual(test_text, entity.__str__())
def expect_fail(self, test_text):
try:
safe_dom.Entity(test_text)
except AssertionError:
return
self.fail('Expected an assert exception')
def test_should_pass_named_entities(self):
self.expect_pass(' ')
def test_should_pass_decimal_entities(self):
self.expect_pass('&')
def test_should_pass_hex_entities(self):
self.expect_pass('⚫')
def test_entities_must_start_with_ampersand(self):
self.expect_fail('nbsp;')
def test_entities_must_end_with_semicolon(self):
self.expect_fail(' ')
def test_named_entities_must_be_all_alpha(self):
self.expect_fail('&qu2ot;')
def test_decimal_entities_must_be_all_decimal_digits(self):
self.expect_fail('A6;')
def test_hex_entities_must_be_all_hex_digits(self):
self.expect_fail('ɪG')
def test_entitiesmust_be_non_empty(self):
self.expect_fail('&;')
self.expect_fail('&#;')
self.expect_fail('&#x;')
def test_should_reject_extraneous_characters(self):
self.expect_fail(' ')
self.expect_fail(' ')
def test_should_reject_tampering(self):
entity = safe_dom.Entity(' ')
entity._entity = '<script/>'
try:
entity.__str__()
except AssertionError:
return
self.fail('Expected an assert exception')
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common/locale.py."""
__author__ = 'John Orr (jorr@google.com)'
import unittest
from common import locales
class ParseAcceptLanguageTests(unittest.TestCase):
"""Unit tests for parsing of Accept-Language HTTP header."""
def test_parses_well_formatted_strings(self):
well_formatted_strings_with_expectations = (
('en-US', [('en_US', 1.0)]),
('en-US,el-GR,fr', [('en_US', 1.0), ('el_GR', 1.0), ('fr', 1.0)]),
('en-US,el;q=0.8', [('en_US', 1.0), ('el', 0.8)]))
for acc_lang, expectations in well_formatted_strings_with_expectations:
parsed = locales.parse_accept_language(acc_lang)
self.assertEquals(expectations, parsed)
def test_arranges_quality_scores_in_decreasing_order(self):
parsed = locales.parse_accept_language('en-US;q=0.8,el;q=1.0')
expected = [('el', 1.0), ('en_US', 0.8)]
self.assertEquals(expected, parsed)
def test_appect_lang_header_length_capped_at_8k(self):
with self.assertRaises(AssertionError):
locales.parse_accept_language('x' * 8192)
def test_coerces_case_to_standard_form(self):
"""Expect form xx_XX returned."""
self.assertEqual(
[('en_US', 1.0), ('el_GR', 1.0), ('fr', 1.0)],
locales.parse_accept_language('en-us,EL-gr,FR'))
def test_item_split_ignores_whitespace(self):
"""Expect form xx_XX returned."""
self.assertEqual(
[('en_US', 1.0), ('el_GR', 1.0), ('fr', 1.0)],
locales.parse_accept_language('en-US, el-gr , fr '))
def test_rejects_invalid_syntax(self):
self.assertEqual(
[('el', 1.0), ('fr', 1.0)],
locales.parse_accept_language('el,-us,en-,12-34,fr'))
class LocalesTests(unittest.TestCase):
"""Unit tests for the locale helper functions."""
def test_supported_locale_count(self):
# NOTE: If this count increases then locales.LOCALES_DISPLAY_NAMES must
# be updated with the localized display names of the new locale.
self.assertEquals(59, len(locales.get_system_supported_locales()))
def test_localized_display_name(self):
self.assertEquals('Deutsch (de)', locales.get_locale_display_name('de'))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.