code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# -*- coding: UTF-8 -*-
import logging
import hashlib
import httplib
import urllib
import urllib2
import uuid
import datetime
import base64
import re
import string
from google.appengine.ext import deferred
from google.appengine.api.labs import taskqueue
from waveapi import simplejson
from notifiy import model
from notifiy import templates
def get_account(participant, create=False):
if not participant: return
pp = model.ParticipantPreferences.get_by_pk(participant)
if not pp: return
if not pp.account_id and not create: return
if not pp.account_id:
account = model.Account.get_by_pk(str(uuid.uuid4()), None, create=True)
account.put()
pp.account_id = account.account_id
pp.put()
return account
else:
return model.Account.get_by_pk(pp.account_id, None)
def send_message(pwp, modified_by, title, wave_id, wavelet_id, blip_id, message, extra=None):
account = get_account(pwp.participant)
if not account: return
logging.debug('Sending message to phones for account %s' % account.account_id)
if not account.expiration_date or account.expiration_date < datetime.date.today():
logging.warn('Account expired %s' % account.expiration_date)
return
query = model.Phone.all()
query.filter('account_id =', account.account_id)
for phone in query:
logging.debug('Sending message to %s %s' % (phone.phone_type, phone.phone_uid))
if phone.phone_type == 'iphone':
m = hashlib.md5()
m.update(unicode(title).encode("UTF-8"))
m.update(unicode(message).encode("UTF-8"))
if extra:
m.update(str(datetime.datetime.now()))
text_hash = m.hexdigest()
name = '%s-%s-%s' % (wave_id, phone.phone_token, text_hash)
name = re.compile('[^a-zA-Z0-9-]').sub('X', name)
try:
deferred.defer(send_message_to_iphone,
participant=pwp.participant,
phone_token=phone.phone_token,
wave_id=wave_id,
wavelet_id=wavelet_id,
blip_id=blip_id,
title=title,
message=message,
#_name=name,
_queue="send-phone")
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warn('Repeated phone notification %s', e)
def send_message_to_iphone(participant, phone_token, wave_id, wavelet_id, blip_id, title, message):
remote_url = model.ApplicationSettings.get('apn-url')[8:].split('/', 1)
apn_type = model.ApplicationSettings.get('apn-type')
user = model.ApplicationSettings.get('apn-key-%s' % apn_type)
passwd = model.ApplicationSettings.get('apn-master-secret-%s' % apn_type)
title = title.strip()
message = message.strip()
json = construct_message(participant, phone_token, wave_id, wavelet_id, blip_id, '', '')
maxlen = 255 - len(json.encode("utf-8"))
if len(title.encode("utf-8")) + len(message.encode("utf-8")) > maxlen:
maxlen_title = int(maxlen * 0.3)
if len(title.encode("utf-8")) > maxlen_title:
if maxlen_title > 3:
title = title[:maxlen_title - 3] + '...'
else:
title = ''
maxlen = maxlen - len(title.encode("utf-8"))
if len(message.encode("utf-8")) > maxlen:
if maxlen > 3:
message = message[:maxlen - 3] + '...'
else:
message = ''
json = construct_message(participant, phone_token, wave_id, wavelet_id, blip_id, title, message)
headers = { 'Content-Type': 'application/json',
'Content-Length': len(json),
'Authorization': 'Basic %s' % string.strip(base64.b64encode(user + ':' + passwd)) }
logging.debug('%s:%s' % (user, passwd))
logging.debug('Trying to send %s\n%s\n%s' % (remote_url, headers, json))
conn = httplib.HTTPSConnection(remote_url[0])
conn.request("POST", '/%s' % remote_url[1], json, headers)
response = conn.getresponse()
if response.status != 200:
logging.error('Error calling remote notification server: %s %s', response.reason, response.read())
else:
logging.info('Done sending notification')
def construct_message(participant, phone_token, wave_id, wavelet_id, blip_id, title, message):
message = (templates.PHONE_MESSAGE % (title, message))
if phone_token:
data = { 'device_tokens': [ phone_token.replace(' ', '').upper() ],
'aps': { 'alert': { 'body': message } },
'r': '%s:%s:%s:%s' % (participant, wave_id, wavelet_id, blip_id) }
else:
data = { 'aps': { 'alert': { 'body': message } },
'r': '%s:%s:%s:%s' % (participant, wave_id, wavelet_id, blip_id) }
return simplejson.dumps(data, separators=(',', ':'))
def send_message_to_iphone_2(participant, phone_uid, phone_token, wave_id, wavelet_id, blip_id, message, url):
remote_url = model.ApplicationSettings.get('remote-server');
data = { 'uid': phone_uid,
'token': phone_token,
'participant': participant,
'wave_id': wave_id,
'wavelet_id': wavelet_id,
'blip_id': blip_id,
'message': message,
'url': url }
logging.debug('Trying to send %s' % urllib.urlencode(data))
try:
urllib2.urlopen(remote_url, urllib.urlencode(data))
logging.info('Success calling remote notification server')
except Exception, e:
logging.error('Error calling remote notification server: %s' % e)
| Python |
# -*- coding: UTF-8 -*-
import logging
import traceback
from google.appengine.api import mail
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from notifiy import constants
from notifiy import model
from notifiy import templates
from notifiy import util
class ReceiveEmail(InboundMailHandler):
def receive(self, message):
body = '\n'.join([b.decode() for (a, b) in message.bodies(content_type='text/plain')])
if '<' in message.to and '>' in message.to:
to = message.to[message.to.find('<') + 1:message.to.find('>')]
else:
to = message.to
to =to.split('@')
if '<' in message.sender and '>' in message.sender:
sender = message.sender[message.sender.find('<') + 1:message.sender.find('>')]
else:
sender = message.sender.split('@')
logging.debug('incoming email from %s to %s@%s', sender, *to);
try:
if to[0].startswith('remove-'):
self.remove(sender)
else:
self.process_incoming(message.subject, body, sender, to)
except Exception, e:
logging.exception('Error processing email %s', e)
mail.send_mail('Notifiy <%s>' % constants.ROBOT_EMAIL,
sender,
'RE: %s' % message.subject,
templates.ERROR_BODY % (message.subject, e, body))
def remove(self, sender):
logging.debug('unsubscribe %s' % sender)
query = model.ParticipantPreferences.all()
query.filter('email =', sender)
for pp in query:
pp.email = ''
pp.put()
mail.send_mail(constants.ROBOT_EMAIL,
sender,
templates.UNSUBSCRIBED_SUBJECT,
templates.UNSUBSCRIBED_BODY)
def process_incoming(self, subject, body, sender, to):
to = to[0].split('.')
participant = util.modified_b64decode(to[0])
wave_id = util.modified_b64decode(to[1])
wavelet_id = util.modified_b64decode(to[2])
blip_id = util.modified_b64decode(to[3])
q = model.ParticipantPreferences.all()
q.filter('participant =', participant)
q.filter('email =', sender)
if not q.get():
error = 'Invalid email %s not registered to %s' % (sender, participant)
logging.info(error)
mail.send_mail('Notifiy <%s>' % constants.ROBOT_EMAIL, sender,
subject, templates.ERROR_BODY % (subject, error, body))
return
logging.debug('incoming email from %s [participant=%s, wave_id=%s, ' +
'wavelet_id=%s, blip_id=%s]: %s', sender, participant,
wave_id, wavelet_id, blip_id, body)
util.reply_wavelet(wave_id, wavelet_id, blip_id, participant,
util.process_body(body)) | Python |
# -*- coding: UTF-8 -*-
ME = 'cesar.izurieta@googlewave.com'
ROBOT_NAME = 'notifiy'
ROBOT_ID = 'wave-email-notifications'
ROBOT_ADDRESS = '%s@appspot.com' % ROBOT_ID
ROBOT_BASE_URL = 'http://%s.appspot.com' % ROBOT_ID
ROBOT_EMAIL = '%s@ecuarock.net' % ROBOT_ID
ROBOT_HOME_PAGE = 'http://%s.googlecode.com' % ROBOT_ID
ROBOT_IMG = '%s/%s' % (ROBOT_BASE_URL, 'favicon.png')
ROBOT_LOGO = '%s/%s' % (ROBOT_BASE_URL, 'logo.png')
RPC_URL = {
'googlewave.com': 'http://gmodules.com/api/rpc',
'wavesandbox.com': 'http://sandbox.gmodules.com/api/rpc'
}
| Python |
# -*- coding: UTF-8 -*-
import logging
from waveapi import appengine_robot_runner
from waveapi import events
from waveapi.robot import Robot
from notifiy import constants
from notifiy import model
from notifiy import notifications
from notifiy import preferences
from notifiy import templates
from notifiy import general
###################################################
# General handlers
###################################################
def on_wavelet_self_added(event, wavelet):
if preferences.is_preferences_wave(wavelet): return
logging.info('%s called', event.type)
setup_oauth(wavelet.robot, wavelet.domain)
general.wavelet_init(wavelet, event.modified_by)
def on_wavelet_self_removed(event, wavelet):
if preferences.is_preferences_wave(wavelet): return
logging.info('%s called', event.type)
setup_oauth(wavelet.robot, wavelet.domain)
general.wavelet_deinit(wavelet)
def on_wavelet_participants_changed(event, wavelet):
if preferences.is_preferences_wave(wavelet): return
logging.info('%s called', event.type)
setup_oauth(wavelet.robot, wavelet.domain)
if wavelet.root_blip and event.blip_id == wavelet.root_blip.blip_id:
general.wavelet_init(wavelet, event.modified_by)
message = templates.ADDED_MESSAGE % event.modified_by
for participant in event.participants_added:
general.participant_wavelet_init(wavelet, participant,
event.modified_by, message)
###################################################
# Content change handlers
###################################################
def on_blip_submitted(event, wavelet):
if preferences.is_preferences_wave(wavelet): return
logging.info('%s called', event.type)
setup_oauth(wavelet.robot, wavelet.domain)
if wavelet.root_blip and event.blip_id == wavelet.root_blip.blip_id:
general.wavelet_init(wavelet, event.modified_by)
notifications.notify_submitted(wavelet, event.blip, event.modified_by)
def on_wavelet_blip_removed(event, wavelet):
if preferences.is_preferences_wave(wavelet): return
logging.info('%s called', event.type)
setup_oauth(wavelet.robot, wavelet.domain)
if wavelet.root_blip and event.blip_id == wavelet.root_blip.blip_id:
general.wavelet_init(wavelet, event.modified_by)
notifications.notify_removed(wavelet, event.modified_by)
###################################################
# Preferences handlers
###################################################
def on_form_button_clicked(event, wavelet):
if not preferences.is_preferences_wave(wavelet): return
logging.info('%s called', event.type)
setup_oauth(wavelet.robot, wavelet.domain)
preferences.handle_event(event, wavelet)
###################################################
# Main functions
###################################################
def create_robot(run=True, domain=None):
robot = Robot(constants.ROBOT_NAME.title(), image_url=constants.ROBOT_IMG,
profile_url=constants.ROBOT_BASE_URL)
robot.register_handler(events.WaveletSelfAdded, on_wavelet_self_added, context=[ events.Context.ROOT ])
robot.register_handler(events.WaveletSelfRemoved, on_wavelet_self_removed, context=[ events.Context.ROOT ])
robot.register_handler(events.WaveletParticipantsChanged, on_wavelet_participants_changed, context=[ events.Context.ROOT ])
robot.register_handler(events.BlipSubmitted, on_blip_submitted, context=[ events.Context.SELF ])
robot.register_handler(events.WaveletBlipRemoved, on_wavelet_blip_removed, context=[ events.Context.SELF ])
robot.register_handler(events.FormButtonClicked, on_form_button_clicked, context=[ events.Context.ALL ])
# Needed to reauthenticate robot
# verification_token = model.ApplicationSettings.get("verification-token")
# security_token = model.ApplicationSettings.get("security-token")
# robot.set_verification_token_info(verification_token, security_token)
if domain:
setup_oauth(robot, domain)
if run:
appengine_robot_runner.run(robot)
return robot
def setup_oauth(robot, domain):
consumer_key = model.ApplicationSettings.get("consumer-key")
consumer_secret = model.ApplicationSettings.get("consumer-secret")
if domain in constants.RPC_URL:
url = constants.RPC_URL[domain]
else:
url = constants.RPC_URL['googlewave.com'] # TODO
robot.setup_oauth(consumer_key, consumer_secret, url)
| Python |
# -*- coding: UTF-8 -*-
import logging
from waveapi.element import Gadget
from notifiy import constants
from notifiy import model
from notifiy import preferences
GADGET_URL = '%s/%s.xml' % (constants.ROBOT_BASE_URL, constants.ROBOT_ID)
def is_gadget_present(wavelet):
return bool(wavelet.root_blip.first(Gadget, url=GADGET_URL))
def gadget_add(wavelet):
if not is_gadget_present(wavelet):
try:
wavelet.root_blip.at(1).insert(Gadget(GADGET_URL))
except IndexError:
logging.warn('Could not insert gadget!')
def gadget_remove(wavelet):
if is_gadget_present(wavelet):
wavelet.root_blip.all(GADGET_URL).delete()
def handle_state_change(event, wavelet):
if not wavelet.root_blip.blip_id == event.blip_id: return
if not wavelet.root_blip.all(Gadget)[event.index].url == GADGET_URL: return
pp = model.ParticipantPreferences.get_by_pk(event.modified_by)
preferences_wavelet = preferences.fetch_preferences_wavelet(wavelet, pp.preferences_wave_id, None)
eh = preferences.ExecHandler(event, preferences_wavelet)
eh.reset()
| Python |
# -*- coding: UTF-8 -*-
import datetime
import logging
import urllib
import urllib2
import traceback
from google.appengine.ext import db
from google.appengine.ext import webapp
from waveapi import simplejson
from notifiy import model
from notifiy import util
from notifiy import phone
from notifiy import preferences
LOG = '''\
--- TYPE: %s ---
participant: %s
activation: %s
phone uid: %s
phone token: %s
receipt data: %s'''
def freetrial(d):
year = d.year
month = d.month + 1
day = d.day
if month > 12:
month = month - 12
year = year + 1
return datetime.date(year, month, day)
def oneyear(d):
return datetime.date(d.year + 1, d.month, d.day)
def sixmonths(d):
year = d.year
month = d.month + 6
day = d.day
if month > 12:
month = month - 12
year = year + 1
return datetime.date(year, month, day)
FREE_TRIAL = 'com.wavenotifications.notifiy.FreeTrial001'
PRODUCT_IDS = {
FREE_TRIAL : freetrial,
'com.wavenotifications.notifiy.OneYear001': oneyear,
'com.wavenotifications.notifiy.SixMonths001': sixmonths
}
class PhoneProcess(webapp.RequestHandler):
def post(self, *args):
self.get()
def get(self, *args):
self.response.contentType = 'application/json'
path = [urllib.unquote(a) for a in self.request.path.split('/')[2:]]
req_type = path[0]
self.participant = self.request.get('participant')
if self.participant:
self.participant = self.participant.lower()
self.activation = self.request.get('activation')
self.receipt_data = self.request.get('receipt_data')
self.phone_uid = self.request.get('phone_uid')
self.phone_type = self.request.get('phone_type')
self.phone_token = self.request.get('phone_token')
if self.phone_token:
self.phone_token = self.phone_token.replace('+', ' ')
logging.debug(LOG, req_type, self.participant, self.activation,
self.phone_uid, self.phone_token, self.receipt_data)
self.account = None
try:
if req_type.startswith('_'): return
error = getattr(self, req_type)()
if error == False: return
except Exception, e:
logging.exception('Error while processing phone process %s', e)
error = str(e)
data = None
if not error and self.account:
query = model.Phone.all()
query.filter('account_id =', self.account.account_id)
phones = [phone1.phone_uid for phone1 in query]
query = model.ParticipantPreferences.all()
query.filter('account_id =', self.account.account_id)
participants = [pa.participant for pa in query]
data = { 'phones': phones,
'participants': participants,
'subscription_type': self.account.subscription_type,
'expiration_date': str(self.account.expiration_date),
'response': "OK" }
else:
data = { 'response': "ERROR", 'message': error or 'Invalid Google Wave account' }
logging.debug('RESPONSE: %s' % simplejson.dumps(data));
self.response.out.write(simplejson.dumps(data))
def info(self):
'''Gets info about an account by participant of phone'''
if self.participant:
self.account = phone.get_account(self.participant)
if not self.account: return 'No account found for participant'
self._find_account_by_phone()
if not self.account and not self.participant:
return 'No account found for phone'
def activate(self):
'''Activates either a phone or a participant'''
if self.participant and not self._validate():
return 'Invalid Google Wave account or activation code'
self.account = phone.get_account(self.participant)
self._find_account_by_phone() or self._create_account()
if not self.account:
return 'No account found or account could not be created.'
if self.receipt_data:
error = self._update_account()
else:
error = self._register_phone()
return error
def deactivate(self):
if self.participant:
if not self._validate():
return 'Invalid Google Wave account or activation code'
self.account = phone.get_account(self.participant)
if not self.account:
return 'Participant doesn\'t have an account'
query = model.ParticipantPreferences.all()
query.filter('account_id =', self.account.account_id)
if len(list(query)) == 1:
return 'Cannot deactivate this participant from account. There\'s only participant linked to the account.'
pp = model.ParticipantPreferences.get_by_pk(self.participant)
pp.account_id = None
pp.put()
elif self.phone_type and self.phone_uid and self.phone_token:
self._find_account_by_phone()
query = model.Phone.all()
query.filter('phone_type =', self.phone_type)
query.filter('phone_uid =', self.phone_uid)
query.filter('phone_token =', self.phone_token)
db.delete(query)
def reply(self):
participant = self.request.get('participant')
wave_id = self.request.get('wave_id')
wavelet_id = self.request.get('wavelet_id')
blip_id = self.request.get('blip_id')
message = self.request.get('message')
logging.debug('incoming reply from phone [participant=%s, wave_id=%s,' +
'wavelet_id=%s, blip_id=%s]: %s', participant, wave_id,
wavelet_id, blip_id, message)
util.reply_wavelet(wave_id, wavelet_id, blip_id, participant, message)
def fetch(self):
participant = self.request.get('participant')
wave_id = self.request.get('wave_id')
wavelet_id = self.request.get('wavelet_id')
blip_id = self.request.get('blip_id')
wavelet = util.fetch_wavelet(wave_id, wavelet_id, participant)
if blip_id in wavelet.blips:
blip = wavelet.blips[blip_id]
data = { 'blipId': blip.blip_id,
'waveId': blip.wave_id,
'waveletId': blip.wavelet_id,
'creator': blip.creator,
'content': blip.text }
logging.debug('RESPONSE: %s' % simplejson.dumps(data));
self.response.out.write(simplejson.dumps(data))
return False
else:
return 'Blip not found'
def _validate(self):
'''Check for activation code for phone'''
query = model.ParticipantPreferences.all()
query.filter('participant =', self.participant)
query.filter('activation =', self.activation)
return bool(query.get())
def _find_account_by_phone(self):
'''Try to get account linked to phone if possible'''
if not self.account and self.phone_type and self.phone_uid and self.phone_token:
query = model.Phone.all()
query.filter('phone_type =', self.phone_type)
query.filter('phone_uid =', self.phone_uid)
query.filter('phone_token =', self.phone_token)
account_phone = query.get()
if not account_phone: return False
self.account = model.Account.get_by_pk(account_phone.account_id,
None)
if self.participant:
pp = model.ParticipantPreferences.get_by_pk(self.participant)
pp.account_id = self.account.account_id
pp.put()
return bool(self.account)
def _create_account(self):
'''Try to create an account'''
if self.account or not self.participant: return
self.account = phone.get_account(self.participant, create=True)
def _update_account(self):
'''Update account or create one if it does not exist yet'''
if not self.receipt_data: return
if self.receipt_data == FREE_TRIAL:
if self.account.subscription_type:
return 'Cannot activate the free trial, a subscription already exists.'
purchase_date = datetime.datetime.now()
transaction_id = FREE_TRIAL
subscription_type = FREE_TRIAL
else:
type = model.ApplicationSettings.get('apn-type')
receipt_url = model.ApplicationSettings.get('apn-receipt-url-%s' % type)
data = simplejson.dumps({ 'receipt-data': self.receipt_data })
json = simplejson.loads(urllib2.urlopen(receipt_url, data).read())
if json['status'] != 0:
return 'Invalid receipt'
subscription_type = json['receipt']['product_id']
transaction_id = json['receipt']['transaction_id']
query = model.Account.all()
query.filter('transaction_id =', transaction_id)
if query.get():
return 'Cannot use receipt, account already activated with this receipt.'
if self.account.transaction_id and self.account.transaction_id != transaction_id:
purchase_date = self.account.expiration_date
else:
purchase_date = json['receipt']['purchase_date'].split(" ")[0]
purchase_date = datetime.datetime.strptime(purchase_date, "%Y-%m-%d")
if subscription_type in PRODUCT_IDS:
self._save_history()
self.account.expiration_date = PRODUCT_IDS[subscription_type](purchase_date)
self.account.subscription_type = subscription_type
self.account.transaction_id = transaction_id
self.account.receipt_data = self.receipt_data
self.account.put()
else:
return "Invalid Product ID %s" % subscription_type
def _save_history(self):
account = model.Account.get_by_pk(self.account.account_id,
datetime.datetime.now(), create=True)
account.subscription_type = self.account.subscription_type
account.expiration_date = self.account.expiration_date
account.receipt_data = self.account.receipt_data
account.transaction_id = self.account.transaction_id
account.put()
def _register_phone(self):
'''Create or update Phone'''
if self.phone_uid and self.phone_type and self.phone_token:
ap = model.Phone.get_by_pk(self.phone_type, self.phone_uid, create=True)
ap.phone_token = self.phone_token
ap.account_id = self.account.account_id
ap.put()
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import random
from google.appengine.ext import db
# TODO from google.appengine.api import memcache
from migrationmodel import MigratingModel, get_by_pk
NOTIFY_NONE = 0
NOTIFY_ONCE = 1
NOTIFY_ALL = 2
NOTIFY_TYPE_COUNT = 3
class Phone(MigratingModel):
migration_version = 1
phone_type = db.StringProperty(required=True)
phone_uid = db.StringProperty(required=True)
phone_token = db.StringProperty()
account_id = db.StringProperty()
pk = ['phone_type', 'phone_uid']
class Account(MigratingModel):
migration_version = 1
account_id = db.StringProperty(required=True)
to_date = db.DateTimeProperty(default=None)
subscription_type = db.StringProperty()
expiration_date = db.DateProperty()
transaction_id = db.StringProperty()
receipt_data = db.TextProperty()
pk = ['account_id', 'to_date']
class ParticipantPreferences(MigratingModel):
migration_version = 3
participant = db.StringProperty(required=True)
notify = db.BooleanProperty(default=True)
notify_initial = db.BooleanProperty(default=True)
email = db.StringProperty()
activation = db.StringProperty()
preferences_wave_id = db.StringProperty()
account_id = db.StringProperty()
pk = ['participant']
preferencesWaveId = db.StringProperty(default=None) # Deprecated use preferences_wave_id
def __init__(self, *args, **kwds):
self.activation = random_activation()
super(ParticipantPreferences, self).__init__(*args, **kwds)
def put(self, *args, **kwds):
# TODO memcache.set(self.get_key(), self, namespace='pp')
super(ParticipantPreferences, self).put(*args, **kwds)
def migrate_1(self):
if self.notify_initial == None:
self.notify_initial = True
def migrate_2(self):
if self.activation == None:
self.activation = random_activation()
def migrate_3(self):
if self.preferencesWaveId:
self.preferences_wave_id = self.preferencesWaveId;
class ParticipantWavePreferences(MigratingModel):
migration_version = 2
participant = db.StringProperty(required=True)
wave_id = db.StringProperty(required=False) # TODO migrate all entities
notify_type = db.IntegerProperty(default=NOTIFY_NONE)
visited = db.BooleanProperty(default=False)
last_visited = db.DateTimeProperty()
pk = ['participant', 'wave_id']
waveId = db.StringProperty(default=None) # Deprecated use wave_id
notify = db.BooleanProperty(default=None) # Deprecated use notify_type
#def put(self, *args, **kwds):
# TODO
# memcache.set(self.get_key(), self, namespace='pwp')
# super(ParticipantWavePreferences, self).put(*args, **kwds)
def migrate_1(self):
if self.notify != None:
if self.notify:
self.notify_type = NOTIFY_ALL
self.notify = None
def migrate_2(self):
if self.waveId:
self.wave_id = self.waveId;
@classmethod
def get_by_pk(cls, *args, **kw):
o = get_by_pk(cls, *args, **kw)
if not o:
q = ParticipantWavePreferences.all()
q.filter('participant =', args[0])
q.filter('waveId =', args[1])
o = q.get()
return o
class ApplicationSettings(MigratingModel):
migration_version = 0
keyname = db.StringProperty(required=True)
value = db.StringProperty()
pk = ['keyname']
@classmethod
def get(cls, keyname):
return cls.get_by_pk(keyname).value
def random_activation():
return ''.join([str(random.randint(0, 9)) for a in range(9)])
| Python |
# -*- coding: UTF-8 -*-
import logging
from waveapi import element
from notifiy import constants
from notifiy import model
from notifiy import templates
PARTICIPANT_DATA_DOC = '%s/participant' % constants.ROBOT_ADDRESS
VERSION_DATA_DOC = '%s/preferencesVersion' % constants.ROBOT_ADDRESS
PREFERENCES_VERSION = '14'
SETTIE_ROBOT = 'settie@a.gwave.com'
def is_preferences_wave(wavelet):
return VERSION_DATA_DOC in wavelet.data_documents
def find_participant(wavelet, participant=None):
if PARTICIPANT_DATA_DOC in wavelet.data_documents:
return wavelet.data_documents[PARTICIPANT_DATA_DOC]
else:
return participant
def fetch_preferences_wavelet(wavelet, preferences_wave_id):
prefs_wavelet = wavelet.robot.fetch_wavelet(preferences_wave_id)
return prefs_wavelet
def create_preferences_wave(robot, participant):
domain = participant.split('@')[1]
participants = [ constants.ROBOT_ADDRESS, SETTIE_ROBOT, participant ]
prefs_wavelet = robot.new_wave(domain, participants, submit=True)
update_preferences_wavelet(prefs_wavelet, participant, force=True)
robot.submit(prefs_wavelet)
def update_preferences_wavelet(wavelet, participant=None, force=False):
if not force and wavelet.data_documents[VERSION_DATA_DOC] == PREFERENCES_VERSION: return
participant = find_participant(wavelet, participant)
pp = model.ParticipantPreferences.get_by_pk(participant)
logging.debug('Updating preferences wave content for %s', participant)
if force:
pp.preferences_wave_id = wavelet.wave_id
pp.put()
content = []
content += [ element.Image(url=constants.ROBOT_LOGO, width=200, height=100, caption=constants.ROBOT_NAME.title()) ]
content.append('\n')
content += [ element.Check('notify', pp.notify), ' Notify me to this email:\n', element.Input('email', str(pp.email)), '\n' ]
content += [ element.Check('notify_initial', pp.notify_initial), ' Send initial notifications', '\n' ]
content.append('\n')
content += [ 'Phone activation code: %s\n' % pp.activation ]
if pp.account_id:
content += [ 'Phone account id: %s\n' % pp.account_id ]
query = model.Phone.all()
query.filter('account_id =', pp.account_id)
content += [ 'Phones associated with this account: %s\n' % len(list(query)) ]
query = model.ParticipantPreferences.all()
query.filter('account_id =', pp.account_id)
content += [ 'Google wave accounts associated: ' ]
content += [ ','.join([ '%s' % pp2.participant for pp2 in query ]) ]
content.append('\n')
content.append('\n')
content += [ element.Button('save_pp', 'Save'), ' ', element.Button('refresh_pp', 'Refresh'), '\n' ]
content.append('\n')
content += [ 'Execute global commands: (try "help")', element.Input('command', ''), element.Button('exec_pp', 'Exec') ]
wavelet.root_blip.all().delete()
wavelet.data_documents[PARTICIPANT_DATA_DOC] = participant
wavelet.data_documents[VERSION_DATA_DOC] = PREFERENCES_VERSION
wavelet.title = 'Notifiy global preferences'
for c in content:
wavelet.root_blip.append(c)
def delete_preferences_wavelet(wavelet, participant=None):
if not wavelet: return
if not participant:
participant = find_participant(wavelet)
pp = model.ParticipantPreferences.get_by_pk(participant)
if not pp: return
prefs_wavelet = fetch_preferences_wavelet(wavelet, pp.preferences_wave_id)
prefs_wavelet.title = "Please delete this wave"
del prefs_wavelet.data_documents[PARTICIPANT_DATA_DOC]
del prefs_wavelet.data_documents[VERSION_DATA_DOC]
prefs_wavelet.root_blip.all().delete()
wavelet.robot.submit(prefs_wavelet)
def handle_event(event, wavelet):
participant = find_participant(wavelet, event.modified_by)
logging.debug('Preferences if %s == %s' % (participant, event.modified_by))
if participant != event.modified_by: return
if event.button_name == 'save_pp':
pp = model.ParticipantPreferences.get_by_pk(participant)
for t, f, p in [ (element.Check, bool, 'notify'),
(element.Input, str, 'email'),
(element.Check, bool, 'notify_initial') ]:
form_element = wavelet.root_blip.first(t, name=p).value()
pp.__setattr__(p, f(form_element.value))
wavelet.reply(templates.PREFERENCES_SAVED)
elif event.button_name == 'refresh_pp':
if ExecHandler(event, wavelet).refresh():
wavelet.reply(templates.COMMAND_SUCCESSFUL % 'refresh')
else:
wavelet.reply(templates.ERROR_TRY_AGAIN)
elif event.button_name == 'exec_pp':
eh = ExecHandler(event, wavelet)
form_element = wavelet.root_blip.first(element.Input, name='command').value()
command = form_element.value.split(' ')
if hasattr(eh, command[0]):
result = getattr(eh, command[0])(*command[1:])
if result == True:
wavelet.reply(templates.COMMAND_SUCCESSFUL % form_element.value)
elif result == False:
wavelet.reply(templates.ERROR_TRY_AGAIN)
elif result:
wavelet.reply(result)
else:
wavelet.reply(templates.COMMAND_UNKNOWN % command)
class ExecHandler(object):
def __init__(self, event, wavelet):
self.event = event
self.wavelet = wavelet
def help(self):
logging.debug('ExecHandler help')
return templates.COMMANDS_HELP
def refresh(self):
logging.debug('ExecHandler refresh')
update_preferences_wavelet(self.wavelet, self.event.modified_by, force=True)
return True
def clean(self):
logging.debug('ExecHandler clean')
delete = []
for blip_id in self.wavelet.blips:
if blip_id != self.wavelet.root_blip.blip_id:
delete.append(blip_id)
for blip_id in delete:
self.wavelet.delete(blip_id)
def reset(self):
logging.debug('ExecHandler reset')
return "Not implemented yet"
def regen(self, participant=None):
pp = model.ParticipantPreferences.get_by_pk(participant or self.event.modified_by)
pp.activation = model.random_activation()
pp.put()
return self.refresh()
def recreate(self, participant=None):
logging.debug('ExecHandler recreate')
delete_preferences_wavelet(self.wavelet, participant or self.event.modified_by)
create_preferences_wave(self.wavelet.robot, participant or self.event.modified_by)
return True
| Python |
# -*- coding: UTF-8 -*-
###################################################
# General mail template
###################################################
MESSAGE_TEMPLATE = u'''\
%s
---
Reply to this message to add a blip to the wave
Visit this wave: %s
Change global notification preferences: %s
To unsubscribe please visit your preferences or send an email to: %s
'''
NOTIFY_ONCE_TEMPLATE = u'''\
%s
[NOTE: you will not recive further messages until you visit this wave]
'''
###################################################
# Individual email messages
###################################################
INITIAL_MESSAGE = u'To receive email notifications visit this wave and activate them.'
ROBOT_ADDED = u'The notifiy robot has been added to this wave. '
ADDED_MESSAGE = u'%s added you as a participant to this wave.'
CONTENT_DELETED = u'Some content was deleted from the wave'
CONTENT_SUPRESSED = u'%s... [some content was supressed]'
PHONE_MESSAGE = '[wave] %s: %s'
###################################################
# Unsubscribed messages
###################################################
UNSUBSCRIBED_SUBJECT = u'Unsubscribed'
UNSUBSCRIBED_BODY = u'Your email has been unsubscribed from the Notifiy robot. \
To receive notifications again please visit Google Wave and update your preferences. \
Your email may still show there, just click the refresh button.'
###################################################
# Preferences wave messages
###################################################
COMMANDS_HELP = u'''
help: Show this help
refresh: Recreate the preferences wave
clean: Clean all messages in this wave.
regen: Regenerate the activation code.
reset: Reset your specific wave preferenes (for all waves) and refresh this form.
'''
COMMAND_SUCCESSFUL = u'Command %s ran successfully'
COMMAND_UNKNOWN = u'Command %s not found'
PREFERENCES_SAVED = u'Preferences saved'
ERROR_TRY_AGAIN = u'There was an error, please try again in a few moments'
###################################################
# Error messages
###################################################
ERROR_BODY = u'''Your message "%s" could not be processed because of the following error:
%s
=========================
ORIGINAL MESSAGE FOLLOWS:
=========================
%s
'''
| Python |
# FROM: http://stackoverflow.com/questions/1567148/method-to-migrate-app-engine-models
"""Models which know how to migrate themselves"""
import logging
from google.appengine.ext import db
class MigrationError(Exception):
"""Error migrating"""
class MigratingModel(db.Model):
"""A model which knows how to migrate itself.
Subclasses must define a class-level migration_version integer attribute.
"""
current_migration_version = db.IntegerProperty(required=True, default=0)
def __init__(self, *args, **kw):
if not kw.get('_from_entity'):
# Assume newly-created entities needn't migrate.
try:
kw.setdefault('current_migration_version',
self.__class__.migration_version)
except AttributeError:
msg = ('migration_version required for %s'
% self.__class__.__name__)
logging.critical(msg)
raise MigrationError, msg
super(MigratingModel, self).__init__(*args, **kw)
@classmethod
def from_entity(cls, *args, **kw):
# From_entity() calls __init__() with _from_entity=True
obj = super(MigratingModel, cls).from_entity(*args, **kw)
return obj.migrate()
@classmethod
def get_key(cls, *args):
return ':'.join(map(str, args))
@classmethod
def get_by_pk(cls, *args, **kw):
return get_by_pk(cls, *args, **kw)
def get_key_name(self):
return ':'.join(map(str, map(self.__getattribute__, self.pk)))
def migrate(self):
target_version = self.__class__.migration_version
if self.current_migration_version < target_version:
migrations = range(self.current_migration_version+1, target_version+1)
for self.current_migration_version in migrations:
method_name = 'migrate_%d' % self.current_migration_version
logging.debug('%s migrating to %d: %s'
% (self.__class__.__name__,
self.current_migration_version, method_name))
getattr(self, method_name)()
db.put(self)
return self
def get_by_pk(class_, *args, **kw):
o = None
key_name = None
try:
key_name = class_.get_key(*args)
o = class_.get_by_key_name(key_name)
except Exception, e:
logging.warn("%s: %s -> %s" % (e, args, key_name))
if not o:
q = class_.all()
for pk, val in zip(class_.pk, args):
q.filter('%s =' % pk, val)
o = q.get()
if not o and 'create' in kw and kw['create']:
o = class_(key_name=key_name, ** dict(zip(class_.pk, args)))
o.put()
if o:
o.migrate()
return o
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the element module."""
import base64
import unittest
import element
import util
class TestElement(unittest.TestCase):
"""Tests for the element.Element class."""
def testProperties(self):
el = element.Element(element.Gadget.class_type,
key='value')
self.assertEquals('value', el.key)
def testFormElement(self):
el = element.Input('input')
self.assertEquals(element.Input.class_type, el.type)
self.assertEquals(el.value, '')
self.assertEquals(el.name, 'input')
def testImage(self):
image = element.Image('http://test.com/image.png', width=100, height=100)
self.assertEquals(element.Image.class_type, image.type)
self.assertEquals(image.url, 'http://test.com/image.png')
self.assertEquals(image.width, 100)
self.assertEquals(image.height, 100)
def testAttachment(self):
attachment = element.Attachment(caption='My Favorite', data='SomefakeData')
self.assertEquals(element.Attachment.class_type, attachment.type)
self.assertEquals(attachment.caption, 'My Favorite')
self.assertEquals(attachment.data, 'SomefakeData')
def testGadget(self):
gadget = element.Gadget('http://test.com/gadget.xml')
self.assertEquals(element.Gadget.class_type, gadget.type)
self.assertEquals(gadget.url, 'http://test.com/gadget.xml')
def testInstaller(self):
installer = element.Installer('http://test.com/installer.xml')
self.assertEquals(element.Installer.class_type, installer.type)
self.assertEquals(installer.manifest, 'http://test.com/installer.xml')
def testSerialize(self):
image = element.Image('http://test.com/image.png', width=100, height=100)
s = util.serialize(image)
k = s.keys()
k.sort()
# we should really only have three things to serialize
props = s['properties']
self.assertEquals(len(props), 3)
self.assertEquals(props['url'], 'http://test.com/image.png')
self.assertEquals(props['width'], 100)
self.assertEquals(props['height'], 100)
def testSerializeAttachment(self):
attachment = element.Attachment(caption='My Favorite', data='SomefakeData')
s = util.serialize(attachment)
k = s.keys()
k.sort()
# we should really have two things to serialize
props = s['properties']
self.assertEquals(len(props), 2)
self.assertEquals(props['caption'], 'My Favorite')
self.assertEquals(props['data'], base64.encodestring('SomefakeData'))
self.assertEquals(attachment.data, 'SomefakeData')
def testSerializeLine(self):
line = element.Line(element.Line.TYPE_H1, alignment=element.Line.ALIGN_LEFT)
s = util.serialize(line)
k = s.keys()
k.sort()
# we should really only have three things to serialize
props = s['properties']
self.assertEquals(len(props), 2)
self.assertEquals(props['alignment'], 'l')
self.assertEquals(props['lineType'], 'h1')
def testSerializeGadget(self):
gadget = element.Gadget('http://test.com', {'prop1': 'a', 'prop_cap': None})
s = util.serialize(gadget)
k = s.keys()
k.sort()
# we should really only have three things to serialize
props = s['properties']
self.assertEquals(len(props), 3)
self.assertEquals(props['url'], 'http://test.com')
self.assertEquals(props['prop1'], 'a')
self.assertEquals(props['prop_cap'], None)
def testGadgetElementFromJson(self):
url = 'http://www.foo.com/gadget.xml'
json = {
'type': element.Gadget.class_type,
'properties': {
'url': url,
}
}
gadget = element.Element.from_json(json)
self.assertEquals(element.Gadget.class_type, gadget.type)
self.assertEquals(url, gadget.url)
def testImageElementFromJson(self):
url = 'http://www.foo.com/image.png'
width = '32'
height = '32'
attachment_id = '2'
caption = 'Test Image'
json = {
'type': element.Image.class_type,
'properties': {
'url': url,
'width': width,
'height': height,
'attachmentId': attachment_id,
'caption': caption,
}
}
image = element.Element.from_json(json)
self.assertEquals(element.Image.class_type, image.type)
self.assertEquals(url, image.url)
self.assertEquals(width, image.width)
self.assertEquals(height, image.height)
self.assertEquals(attachment_id, image.attachmentId)
self.assertEquals(caption, image.caption)
def testAttachmentElementFromJson(self):
caption = 'fake caption'
data = 'fake data'
mime_type = 'fake mime'
attachment_id = 'fake id'
attachment_url = 'fake URL'
json = {
'type': element.Attachment.class_type,
'properties': {
'caption': caption,
'data': data,
'mimeType': mime_type,
'attachmentId': attachment_id,
'attachmentUrl': attachment_url,
}
}
attachment = element.Element.from_json(json)
self.assertEquals(element.Attachment.class_type, attachment.type)
self.assertEquals(caption, attachment.caption)
self.assertEquals(data, attachment.data)
self.assertEquals(mime_type, attachment.mimeType)
self.assertEquals(attachment_id, attachment.attachmentId)
self.assertEquals(attachment_url, attachment.attachmentUrl)
def testFormElementFromJson(self):
name = 'button'
value = 'value'
default_value = 'foo'
json = {
'type': element.Label.class_type,
'properties': {
'name': name,
'value': value,
'defaultValue': default_value,
}
}
el = element.Element.from_json(json)
self.assertEquals(element.Label.class_type, el.type)
self.assertEquals(name, el.name)
self.assertEquals(value, el.value)
def testCanInstantiate(self):
bag = [element.Check(name='check', value='value'),
element.Button(name='button', value='caption'),
element.Input(name='input', value='caption'),
element.Label(label_for='button', caption='caption'),
element.RadioButton(name='name', group='group'),
element.RadioButtonGroup(name='name', value='value'),
element.Password(name='name', value='geheim'),
element.TextArea(name='name', value='\n\n\n'),
element.Installer(manifest='test.com/installer.xml'),
element.Line(line_type='type',
indent='3',
alignment='r',
direction='d'),
element.Gadget(url='test.com/gadget.xml',
props={'key1': 'val1', 'key2': 'val2'}),
element.Image(url='test.com/image.png', width=100, height=200),
element.Attachment(caption='fake caption', data='fake data')]
types_constructed = set([type(x) for x in bag])
types_required = set(element.ALL.values())
missing_required = types_constructed.difference(types_required)
self.assertEquals(missing_required, set())
missing_constructed = types_required.difference(types_constructed)
self.assertEquals(missing_constructed, set())
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the util module."""
__author__ = 'davidbyttow@google.com (David Byttow)'
import unittest
import ops
import util
class TestUtils(unittest.TestCase):
"""Tests utility functions."""
def testIsIterable(self):
self.assertTrue(util.is_iterable([]))
self.assertTrue(util.is_iterable({}))
self.assertTrue(util.is_iterable(set()))
self.assertTrue(util.is_iterable(()))
self.assertFalse(util.is_iterable(42))
self.assertFalse(util.is_iterable('list?'))
self.assertFalse(util.is_iterable(object))
def testIsDict(self):
self.assertFalse(util.is_dict([]))
self.assertTrue(util.is_dict({}))
self.assertFalse(util.is_dict(set()))
self.assertFalse(util.is_dict(()))
self.assertFalse(util.is_dict(42))
self.assertFalse(util.is_dict('dict?'))
self.assertFalse(util.is_dict(object))
def testIsUserDefinedNewStyleClass(self):
class OldClass:
pass
class NewClass(object):
pass
self.assertFalse(util.is_user_defined_new_style_class(OldClass()))
self.assertTrue(util.is_user_defined_new_style_class(NewClass()))
self.assertFalse(util.is_user_defined_new_style_class({}))
self.assertFalse(util.is_user_defined_new_style_class(()))
self.assertFalse(util.is_user_defined_new_style_class(42))
self.assertFalse(util.is_user_defined_new_style_class('instance?'))
def testLowerCamelCase(self):
self.assertEquals('foo', util.lower_camel_case('foo'))
self.assertEquals('fooBar', util.lower_camel_case('foo_bar'))
self.assertEquals('fooBar', util.lower_camel_case('fooBar'))
self.assertEquals('blipId', util.lower_camel_case('blip_id'))
self.assertEquals('fooBar', util.lower_camel_case('foo__bar'))
self.assertEquals('fooBarBaz', util.lower_camel_case('foo_bar_baz'))
self.assertEquals('f', util.lower_camel_case('f'))
self.assertEquals('f', util.lower_camel_case('f_'))
self.assertEquals('', util.lower_camel_case(''))
self.assertEquals('', util.lower_camel_case('_'))
self.assertEquals('aBCDEF', util.lower_camel_case('_a_b_c_d_e_f_'))
def assertListsEqual(self, a, b):
self.assertEquals(len(a), len(b))
for i in range(len(a)):
self.assertEquals(a[i], b[i])
def assertDictsEqual(self, a, b):
self.assertEquals(len(a.keys()), len(b.keys()))
for k, v in a.iteritems():
self.assertEquals(v, b[k])
def testSerializeList(self):
data = [1, 2, 3]
output = util.serialize(data)
self.assertListsEqual(data, output)
def testSerializeDict(self):
data = {'key': 'value', 'under_score': 'value2'}
expected = {'key': 'value', 'underScore': 'value2'}
output = util.serialize(data)
self.assertDictsEqual(expected, output)
def testNonNoneDict(self):
a = {'a': 1, 'b': 1}
self.assertDictsEqual(a, util.non_none_dict(a))
b = a.copy()
b['c'] = None
self.assertDictsEqual(a, util.non_none_dict(b))
def testForceUnicode(self):
self.assertEquals(u"aaa", util.force_unicode("aaa"))
self.assertEquals(u"12", util.force_unicode(12))
self.assertEquals(u"\u0430\u0431\u0432",
util.force_unicode("\xd0\xb0\xd0\xb1\xd0\xb2"))
self.assertEquals(u'\u30e6\u30cb\u30b3\u30fc\u30c9',
util.force_unicode(u'\u30e6\u30cb\u30b3\u30fc\u30c9'))
def testSerializeAttributes(self):
class Data(object):
def __init__(self):
self.public = 1
self._protected = 2
self.__private = 3
def Func(self):
pass
data = Data()
output = util.serialize(data)
# Functions and non-public fields should not be serialized.
self.assertEquals(1, len(output.keys()))
self.assertEquals(data.public, output['public'])
def testStringEnum(self):
util.StringEnum()
single = util.StringEnum('foo')
self.assertEquals('foo', single.foo)
multi = util.StringEnum('foo', 'bar')
self.assertEquals('foo', multi.foo)
self.assertEquals('bar', multi.bar)
def testParseMarkup(self):
self.assertEquals('foo', util.parse_markup('foo'))
self.assertEquals('foo bar', util.parse_markup('foo <b>bar</b>'))
self.assertEquals('foo\nbar', util.parse_markup('foo<br>bar'))
self.assertEquals('foo\nbar', util.parse_markup('foo<p indent="3">bar'))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import element
import errors
import util
class Annotation(object):
"""Models an annotation on a document.
Annotations are key/value pairs over a range of content. Annotations
can be used to store data or to be interpreted by a client when displaying
the data.
"""
# Use the following constants to control the display of the client
#: Reserved annotation for setting background color of text.
BACKGROUND_COLOR = "style/backgroundColor"
#: Reserved annotation for setting color of text.
COLOR = "style/color"
#: Reserved annotation for setting font family of text.
FONT_FAMILY = "style/fontFamily"
#: Reserved annotation for setting font family of text.
FONT_SIZE = "style/fontSize"
#: Reserved annotation for setting font style of text.
FONT_STYLE = "style/fontStyle"
#: Reserved annotation for setting font weight of text.
FONT_WEIGHT = "style/fontWeight"
#: Reserved annotation for setting text decoration.
TEXT_DECORATION = "style/textDecoration"
#: Reserved annotation for setting vertical alignment.
VERTICAL_ALIGN = "style/verticalAlign"
def __init__(self, name, value, start, end):
self._name = name
self._value = value
self._start = start
self._end = end
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def start(self):
return self._start
@property
def end(self):
return self._end
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
if self._start >= where:
self._start += inc
if self._end >= where:
self._end += inc
def serialize(self):
"""Serializes the annotation.
Returns:
A dict containing the name, value, and range values.
"""
return {'name': self._name,
'value': self._value,
'range': {'start': self._start,
'end': self._end}}
class Annotations(object):
"""A dictionary-like object containing the annotations, keyed by name."""
def __init__(self, operation_queue, blip):
self._operation_queue = operation_queue
self._blip = blip
self._store = {}
def __contains__(self, what):
if isinstance(what, Annotation):
what = what.name
return what in self._store
def _add_internal(self, name, value, start, end):
"""Internal add annotation does not send out operations."""
if name in self._store:
# TODO: use bisect to make this more efficient.
new_list = []
for existing in self._store[name]:
if start > existing.end or end < existing.start:
new_list.append(existing)
else:
if existing.value == value:
# merge the annotations:
start = min(existing.start, start)
end = max(existing.end, end)
else:
# chop the bits off the existing annotation
if existing.start < start:
new_list.append(Annotation(
existing.name, existing.value, existing.start, start))
if existing.end > end:
new_list.append(Annotation(
existing.name, existing.value, existing.end, end))
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
else:
self._store[name] = [Annotation(name, value, start, end)]
def _delete_internal(self, name, start=0, end=-1):
"""Remove the passed annotaion from the internal representation."""
if not name in self._store:
return
if end < 0:
end = len(self._blip) + end
new_list = []
for a in self._store[name]:
if start > a.end or end < a.start:
new_list.append(a)
elif start < a.start and end > a.end:
continue
else:
if a.start < start:
new_list.append(Annotation(name, a.value, a.start, start))
if a.end > end:
new_list.append(Annotation(name, a.value, end, a.end))
if new_list:
self._store[name] = new_list
else:
del self._store[name]
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
for annotations in self._store.values():
for annotation in annotations:
annotation._shift(where, inc)
# Merge fragmented annotations that should be contiguous, for example:
# Annotation('foo', 'bar', 1, 2) and Annotation('foo', 'bar', 2, 3).
for name, annotations in self._store.items():
new_list = []
for i, annotation in enumerate(annotations):
name = annotation.name
value = annotation.value
start = annotation.start
end = annotation.end
# Find the last end index.
for j, next_annotation in enumerate(annotations[i + 1:]):
# Not contiguous, skip.
if (end < next_annotation.start):
break
# Contiguous, merge.
if (end == next_annotation.start and value == next_annotation.value):
end = next_annotation.end
del annotations[j]
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
def __len__(self):
return len(self._store)
def __getitem__(self, key):
return self._store[key]
def __iter__(self):
for l in self._store.values():
for ann in l:
yield ann
def names(self):
"""Return the names of the annotations in the store."""
return self._store.keys()
def serialize(self):
"""Return a list of the serialized annotations."""
res = []
for v in self._store.values():
res += [a.serialize() for a in v]
return res
class Blips(object):
"""A dictionary-like object containing the blips, keyed on blip ID."""
def __init__(self, blips):
self._blips = blips
def __getitem__(self, blip_id):
return self._blips[blip_id]
def __iter__(self):
return self._blips.__iter__()
def __len__(self):
return len(self._blips)
def _add(self, ablip):
self._blips[ablip.blip_id] = ablip
def _remove_with_id(self, blip_id):
del_blip = self._blips[blip_id]
if del_blip:
# Remove the reference to this blip from its parent.
parent_blip = self._blips[blip_id].parent_blip
if parent_blip:
parent_blip._child_blip_ids.remove(blip_id)
del self._blips[blip_id]
def get(self, blip_id, default_value=None):
"""Retrieves a blip.
Returns:
A Blip object. If none found for the ID, it returns None,
or if default_value is specified, it returns that.
"""
return self._blips.get(blip_id, default_value)
def serialize(self):
"""Serializes the blips.
Returns:
A dict of serialized blips.
"""
res = {}
for blip_id, item in self._blips.items():
res[blip_id] = item.serialize()
return res
class BlipRefs(object):
"""Represents a set of references to contents in a blip.
For example, a BlipRefs instance can represent the results
of a search, an explicitly set range, a regular expression,
or refer to the entire blip. BlipRefs are used to express
operations on a blip in a consistent way that can easily
be transfered to the server.
The typical way of creating a BlipRefs object is to use
selector methods on the Blip object. Developers will not
usually instantiate a BlipRefs object directly.
"""
DELETE = 'DELETE'
REPLACE = 'REPLACE'
INSERT = 'INSERT'
INSERT_AFTER = 'INSERT_AFTER'
ANNOTATE = 'ANNOTATE'
CLEAR_ANNOTATION = 'CLEAR_ANNOTATION'
UPDATE_ELEMENT = 'UPDATE_ELEMENT'
def __init__(self, blip, maxres=1):
self._blip = blip
self._maxres = maxres
@classmethod
def all(cls, blip, findwhat, maxres=-1, **restrictions):
"""Construct an instance representing the search for text or elements."""
obj = cls(blip, maxres)
obj._findwhat = findwhat
obj._restrictions = restrictions
obj._hits = lambda: obj._find(findwhat, maxres, **restrictions)
if findwhat is None:
# No findWhat, take the entire blip
obj._params = {}
else:
query = {'maxRes': maxres}
if isinstance(findwhat, basestring):
query['textMatch'] = findwhat
else:
query['elementMatch'] = findwhat.class_type
query['restrictions'] = restrictions
obj._params = {'modifyQuery': query}
return obj
@classmethod
def range(cls, blip, begin, end):
"""Constructs an instance representing an explicitly set range."""
obj = cls(blip)
obj._begin = begin
obj._end = end
obj._hits = lambda: [(begin, end)]
obj._params = {'range': {'start': begin, 'end': end}}
return obj
def _elem_matches(self, elem, clz, **restrictions):
if not isinstance(elem, clz):
return False
for key, val in restrictions.items():
if getattr(elem, key) != val:
return False
return True
def _find(self, what, maxres=-1, **restrictions):
"""Iterates where 'what' occurs in the associated blip.
What can be either a string or a class reference.
Examples:
self._find('hello') will return the first occurence of the word hello
self._find(element.Gadget, url='http://example.com/gadget.xml')
will return the first gadget that has as url example.com.
Args:
what: what to search for. Can be a class or a string. The class
should be an element from element.py
maxres: number of results to return at most, or <= 0 for all.
restrictions: if what specifies a class, further restrictions
of the found instances.
Yields:
Tuples indicating the range of the matches. For a one
character/element match at position x, (x, x+1) is yielded.
"""
blip = self._blip
if what is None:
yield 0, len(blip)
raise StopIteration
if isinstance(what, basestring):
idx = blip._content.find(what)
count = 0
while idx != -1:
yield idx, idx + len(what)
count += 1
if count == maxres:
raise StopIteration
idx = blip._content.find(what, idx + len(what))
else:
count = 0
for idx, el in blip._elements.items():
if self._elem_matches(el, what, **restrictions):
yield idx, idx + 1
count += 1
if count == maxres:
raise StopIteration
def _execute(self, modify_how, what, bundled_annotations=None):
"""Executes this BlipRefs object.
Args:
modify_how: What to do. Any of the operation declared at the top.
what: Depending on the operation. For delete, has to be None.
For the others it is a singleton, a list or a function returning
what to do; for ANNOTATE tuples of (key, value), for the others
either string or elements.
If what is a function, it takes three parameters, the content of
the blip, the beginning of the matching range and the end.
bundled_annotations: Annotations to apply immediately.
Raises:
IndexError when trying to access content outside of the blip.
ValueError when called with the wrong values.
Returns:
self for chainability.
"""
blip = self._blip
if modify_how != BlipRefs.DELETE:
if type(what) != list:
what = [what]
next_index = 0
matched = []
# updated_elements is used to store the element type of the
# element to update
updated_elements = []
# For now, if we find one markup, we'll use it everywhere.
next = None
hit_found = False
for start, end in self._hits():
hit_found = True
if start < 0:
start += len(blip)
if end == 0:
end += len(blip)
if end < 0:
end += len(blip)
if len(blip) == 0:
if start != 0 or end != 0:
raise IndexError('Start and end have to be 0 for empty document')
elif start < 0 or end < 1 or start >= len(blip) or end > len(blip):
raise IndexError('Position outside the document')
if modify_how == BlipRefs.DELETE:
for i in range(start, end):
if i in blip._elements:
del blip._elements[i]
blip._delete_annotations(start, end)
blip._shift(end, start - end)
blip._content = blip._content[:start] + blip._content[end:]
else:
if callable(what):
next = what(blip._content, start, end)
matched.append(next)
else:
next = what[next_index]
next_index = (next_index + 1) % len(what)
if isinstance(next, str):
next = util.force_unicode(next)
if modify_how == BlipRefs.ANNOTATE:
key, value = next
blip.annotations._add_internal(key, value, start, end)
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
blip.annotations._delete_internal(next, start, end)
elif modify_how == BlipRefs.UPDATE_ELEMENT:
el = blip._elements.get(start)
if not element:
raise ValueError('No element found at index %s' % start)
# the passing around of types this way feels a bit dirty:
updated_elements.append(element.Element.from_json({'type': el.type,
'properties': next}))
for k, b in next.items():
setattr(el, k, b)
else:
if modify_how == BlipRefs.INSERT:
end = start
elif modify_how == BlipRefs.INSERT_AFTER:
start = end
elif modify_how == BlipRefs.REPLACE:
pass
else:
raise ValueError('Unexpected modify_how: ' + modify_how)
if isinstance(next, element.Element):
text = ' '
else:
text = next
# in the case of a replace, and the replacement text is shorter,
# delete the delta.
if start != end and len(text) < end - start:
blip._delete_annotations(start + len(text), end)
blip._shift(end, len(text) + start - end)
blip._content = blip._content[:start] + text + blip._content[end:]
if bundled_annotations:
end_annotation = start + len(text)
blip._delete_annotations(start, end_annotation)
for key, value in bundled_annotations:
blip.annotations._add_internal(key, value, start, end_annotation)
if isinstance(next, element.Element):
blip._elements[start] = next
# No match found, return immediately without generating op.
if not hit_found:
return
operation = blip._operation_queue.document_modify(blip.wave_id,
blip.wavelet_id,
blip.blip_id)
for param, value in self._params.items():
operation.set_param(param, value)
modify_action = {'modifyHow': modify_how}
if modify_how == BlipRefs.DELETE:
pass
elif modify_how == BlipRefs.UPDATE_ELEMENT:
modify_action['elements'] = updated_elements
elif (modify_how == BlipRefs.REPLACE or
modify_how == BlipRefs.INSERT or
modify_how == BlipRefs.INSERT_AFTER):
if callable(what):
what = matched
if what:
if not isinstance(next, element.Element):
modify_action['values'] = [util.force_unicode(value) for value in what]
else:
modify_action['elements'] = what
elif modify_how == BlipRefs.ANNOTATE:
modify_action['values'] = [x[1] for x in what]
modify_action['annotationKey'] = what[0][0]
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
modify_action['annotationKey'] = what[0]
if bundled_annotations:
modify_action['bundledAnnotations'] = [
{'key': key, 'value': value} for key, value in bundled_annotations]
operation.set_param('modifyAction', modify_action)
return self
def insert(self, what, bundled_annotations=None):
"""Inserts what at the matched positions."""
return self._execute(
BlipRefs.INSERT, what, bundled_annotations=bundled_annotations)
def insert_after(self, what, bundled_annotations=None):
"""Inserts what just after the matched positions."""
return self._execute(
BlipRefs.INSERT_AFTER, what, bundled_annotations=bundled_annotations)
def replace(self, what, bundled_annotations=None):
"""Replaces the matched positions with what."""
return self._execute(
BlipRefs.REPLACE, what, bundled_annotations=bundled_annotations)
def delete(self):
"""Deletes the content at the matched positions."""
return self._execute(BlipRefs.DELETE, None)
def annotate(self, name, value=None):
"""Annotates the content at the matched positions.
You can either specify both name and value to set the
same annotation, or supply as the first parameter something
that yields name/value pairs. The name and value should both be strings.
"""
if value is None:
what = name
else:
what = (name, value)
return self._execute(BlipRefs.ANNOTATE, what)
def clear_annotation(self, name):
"""Clears the annotation at the matched positions."""
return self._execute(BlipRefs.CLEAR_ANNOTATION, name)
def update_element(self, new_values):
"""Update an existing element with a set of new values."""
return self._execute(BlipRefs.UPDATE_ELEMENT, new_values)
def __nonzero__(self):
"""Return whether we have a value."""
for start, end in self._hits():
return True
return False
def value(self):
"""Convenience method to convert a BlipRefs to value of its first match."""
for start, end in self._hits():
if end - start == 1 and start in self._blip._elements:
return self._blip._elements[start]
else:
return self._blip.text[start:end]
raise ValueError('BlipRefs has no values')
def __getattr__(self, attribute):
"""Mirror the getattr of value().
This allows for clever things like
first(IMAGE).url
or
blip.annotate_with(key, value).upper()
"""
return getattr(self.value(), attribute)
def __radd__(self, other):
"""Make it possible to add this to a string."""
return other + self.value()
def __cmp__(self, other):
"""Support comparision with target."""
return cmp(self.value(), other)
def __iter__(self):
for start_end in self._hits():
yield start_end
class Blip(object):
"""Models a single blip instance.
Blips are essentially the documents that make up a conversation. Blips can
live in a hierarchy of blips. A root blip has no parent blip id, but all
blips have the ids of the wave and wavelet that they are associated with.
Blips also contain annotations, content and elements, which are accessed via
the Document object.
"""
def __init__(self, json, other_blips, operation_queue):
"""Inits this blip with JSON data.
Args:
json: JSON data dictionary from Wave server.
other_blips: A dictionary like object that can be used to resolve
ids of blips to blips.
operation_queue: an OperationQueue object to store generated operations
in.
"""
self._blip_id = json.get('blipId')
self._operation_queue = operation_queue
self._child_blip_ids = set(json.get('childBlipIds', []))
self._content = json.get('content', '')
self._contributors = set(json.get('contributors', []))
self._creator = json.get('creator')
self._last_modified_time = json.get('lastModifiedTime', 0)
self._version = json.get('version', 0)
self._parent_blip_id = json.get('parentBlipId')
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
if isinstance(other_blips, Blips):
self._other_blips = other_blips
else:
self._other_blips = Blips(other_blips)
self._annotations = Annotations(operation_queue, self)
for annjson in json.get('annotations', []):
r = annjson['range']
self._annotations._add_internal(annjson['name'],
annjson['value'],
r['start'],
r['end'])
self._elements = {}
json_elements = json.get('elements', {})
for elem in json_elements:
self._elements[int(elem)] = element.Element.from_json(json_elements[elem])
self.raw_data = json
@property
def blip_id(self):
"""The id of this blip."""
return self._blip_id
@property
def wave_id(self):
"""The id of the wave that this blip belongs to."""
return self._wave_id
@property
def wavelet_id(self):
"""The id of the wavelet that this blip belongs to."""
return self._wavelet_id
@property
def child_blip_ids(self):
"""The set of the ids of this blip's children."""
return self._child_blip_ids
@property
def child_blips(self):
"""The set of blips that are children of this blip."""
return set([self._other_blips[blid_id] for blid_id in self._child_blip_ids
if blid_id in self._other_blips])
@property
def contributors(self):
"""The set of participant ids that contributed to this blip."""
return self._contributors
@property
def creator(self):
"""The id of the participant that created this blip."""
return self._creator
@property
def last_modified_time(self):
"""The time in seconds since epoch when this blip was last modified."""
return self._last_modified_time
@property
def version(self):
"""The version of this blip."""
return self._version
@property
def parent_blip_id(self):
"""The parent blip_id or None if this is the root blip."""
return self._parent_blip_id
@property
def parent_blip(self):
"""The parent blip or None if it is the root."""
# if parent_blip_id is None, get will also return None
return self._other_blips.get(self._parent_blip_id)
@property
def inline_blip_offset(self):
"""The offset in the parent if this blip is inline or -1 if not.
If the parent is not in the context, this function will always
return -1 since it can't determine the inline blip status.
"""
parent = self.parent_blip
if not parent:
return -1
for offset, el in parent._elements.items():
if el.type == element.Element.INLINE_BLIP_TYPE and el.id == self.blip_id:
return offset
return -1
def is_root(self):
"""Returns whether this is the root blip of a wavelet."""
return self._parent_blip_id is None
@property
def annotations(self):
"""The annotations for this document."""
return self._annotations
@property
def elements(self):
"""Returns a list of elements for this document.
The elements of a blip are things like forms elements and gadgets
that cannot be expressed as plain text. In the text of the blip, you'll
typically find a space as a place holder for the element.
If you want to retrieve the element at a particular index in the blip, use
blip[index].value().
"""
return self._elements.values()
def __len__(self):
return len(self._content)
def __getitem__(self, item):
"""returns a BlipRefs for the given slice."""
if isinstance(item, slice):
if item.step:
raise errors.Error('Step not supported for blip slices')
return self.range(item.start, item.stop)
else:
return self.at(item)
def __setitem__(self, item, value):
"""short cut for self.range/at().replace(value)."""
self.__getitem__(item).replace(value)
def __delitem__(self, item):
"""short cut for self.range/at().delete()."""
self.__getitem__(item).delete()
def _shift(self, where, inc):
"""Move element and annotations after 'where' up by 'inc'."""
new_elements = {}
for idx, el in self._elements.items():
if idx >= where:
idx += inc
new_elements[idx] = el
self._elements = new_elements
self._annotations._shift(where, inc)
def _delete_annotations(self, start, end):
"""Delete all annotations between 'start' and 'end'."""
for annotation_name in self._annotations.names():
self._annotations._delete_internal(annotation_name, start, end)
def all(self, findwhat=None, maxres=-1, **restrictions):
"""Returns a BlipRefs object representing all results for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, maxres, **restrictions)
def first(self, findwhat=None, **restrictions):
"""Returns a BlipRefs object representing the first result for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, 1, **restrictions)
def at(self, index):
"""Returns a BlipRefs object representing a 1-character range."""
return BlipRefs.range(self, index, index + 1)
def range(self, start, end):
"""Returns a BlipRefs object representing the range."""
return BlipRefs.range(self, start, end)
def serialize(self):
"""Return a dictionary representation of this blip ready for json."""
return {'blipId': self._blip_id,
'childBlipIds': list(self._child_blip_ids),
'content': self._content,
'creator': self._creator,
'contributors': list(self._contributors),
'lastModifiedTime': self._last_modified_time,
'version': self._version,
'parentBlipId': self._parent_blip_id,
'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'annotations': self._annotations.serialize(),
'elements': dict([(index, e.serialize())
for index, e in self._elements.items()])
}
def proxy_for(self, proxy_for_id):
"""Return a view on this blip that will proxy for the specified id.
A shallow copy of the current blip is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
"""
operation_queue = self._operation_queue.proxy_for(proxy_for_id)
res = Blip(json={},
other_blips={},
operation_queue=operation_queue)
res._blip_id = self._blip_id
res._child_blip_ids = self._child_blip_ids
res._content = self._content
res._contributors = self._contributors
res._creator = self._creator
res._last_modified_time = self._last_modified_time
res._version = self._version
res._parent_blip_id = self._parent_blip_id
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._other_blips = self._other_blips
res._annotations = self._annotations
res._elements = self._elements
res.raw_data = self.raw_data
return res
@property
def text(self):
"""Returns the raw text content of this document."""
return self._content
def find(self, what, **restrictions):
"""Iterate to matching bits of contents.
Yield either elements or pieces of text.
"""
br = BlipRefs.all(self, what, **restrictions)
for start, end in br._hits():
if end - start == 1 and start in self._elements:
yield self._elements[start]
else:
yield self._content[start:end]
raise StopIteration
def append(self, what, bundled_annotations=None):
"""Convenience method covering a common pattern."""
return BlipRefs.all(self, findwhat=None).insert_after(
what, bundled_annotations=bundled_annotations)
def reply(self):
"""Create and return a reply to this blip."""
blip_data = self._operation_queue.blip_create_child(self.wave_id,
self.wavelet_id,
self.blip_id)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
def append_markup(self, markup):
"""Interpret the markup text as xhtml and append the result to the doc.
Args:
markup: The markup'ed text to append.
"""
markup = util.force_unicode(markup)
self._operation_queue.document_append_markup(self.wave_id,
self.wavelet_id,
self.blip_id,
markup)
self._content += util.parse_markup(markup)
def insert_inline_blip(self, position):
"""Inserts an inline blip into this blip at a specific position.
Args:
position: Position to insert the blip at. This has to be greater than 0.
Returns:
The JSON data of the blip that was created.
"""
if position <= 0:
raise IndexError(('Illegal inline blip position: %d. Position has to ' +
'be greater than 0.') % position)
blip_data = self._operation_queue.document_inline_blip_insert(
self.wave_id,
self.wavelet_id,
self.blip_id,
position)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the robot module."""
import unittest
import events
import ops
import robot
import simplejson
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13","'
'annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/davidbyttow@google.com","value":"David"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
NEW_WAVE_JSON = [{"data":
{"waveletId": "wavesandbox.com!conv+root",
"blipId": "b+LrODcLZkDlu", "waveId":
"wavesandbox.com!w+LrODcLZkDlt"},
"id": "op2"}]
NEW_WAVE_JSON_OLD = [{'data':
[{'data':
{'waveletId': 'googlewave.com!conv+root',
'blipId': 'b+VqQXQbZkCP1',
'waveId': 'googlewave.com!w+VqQXQbZkCP0'},
'id': 'wavelet.create1265055048410'}],
'id': 'op10'}];
class TestRobot(unittest.TestCase):
"""Tests for testing the basic parsing of json in robots."""
def setUp(self):
self.robot = robot.Robot('Testy')
def testCreateWave(self):
self.robot.submit = lambda x: NEW_WAVE_JSON
new_wave = self.robot.new_wave('wavesandbox.com', submit=True)
self.assertEqual('wavesandbox.com!w+LrODcLZkDlt', new_wave.wave_id)
self.robot.submit = lambda x: NEW_WAVE_JSON_OLD
new_wave = self.robot.new_wave('googlewave.com', submit=True)
self.assertEqual('googlewave.com!w+VqQXQbZkCP0', new_wave.wave_id)
def testEventParsing(self):
def check(event, wavelet):
# Test some basic properties; the rest should be covered by
# ops.CreateContext.
root = wavelet.root_blip
self.assertEqual(1, len(wavelet.blips))
self.assertEqual('wdykLROk*13', root.blip_id)
self.assertEqual('test.com!wdykLROk*11', root.wave_id)
self.assertEqual('test.com!conv+root', root.wavelet_id)
self.assertEqual('WAVELET_PARTICIPANTS_CHANGED', event.type)
self.assertEqual({'participantsRemoved': [],
'participantsAdded': ['monty@appspot.com']},
event.properties)
self.robot.test_called = True
self.robot.test_called = False
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
self.assertTrue(self.robot.test_called)
operations = simplejson.loads(json)
# there should be one operation indicating the current version:
self.assertEqual(1, len(operations))
def testWrongEventsIgnored(self):
self.robot.test_called = True
def check(event, wavelet):
called = True
self.robot.test_called = False
self.robot.register_handler(events.BlipSubmitted,
check)
self.robot.process_events(TEST_JSON)
self.assertFalse(self.robot.test_called)
def testOperationParsing(self):
def check(event, wavelet):
wavelet.reply()
wavelet.title = 'new title'
wavelet.root_blip.append_markup('<b>Hello</b>')
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
expected = set([ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
ops.WAVELET_APPEND_BLIP,
ops.WAVELET_SET_TITLE,
ops.DOCUMENT_APPEND_MARKUP])
methods = [operation['method'] for operation in operations]
for method in methods:
self.assertTrue(method in expected)
expected.remove(method)
self.assertEquals(0, len(expected))
def testSerializeWavelets(self):
wavelet = self.robot.blind_wavelet(TEST_JSON)
serialized = wavelet.serialize()
unserialized = self.robot.blind_wavelet(serialized)
self.assertEquals(wavelet.creator, unserialized.creator)
self.assertEquals(wavelet.creation_time, unserialized.creation_time)
self.assertEquals(wavelet.last_modified_time,
unserialized.last_modified_time)
self.assertEquals(wavelet.root_blip.blip_id, unserialized.root_blip.blip_id)
self.assertEquals(wavelet.title, unserialized.title)
self.assertEquals(wavelet.wave_id, unserialized.wave_id)
self.assertEquals(wavelet.wavelet_id, unserialized.wavelet_id)
self.assertEquals(wavelet.domain, unserialized.domain)
def testProxiedBlindWavelet(self):
def handler(event, wavelet):
blind_wavelet = self.robot.blind_wavelet(TEST_JSON, 'proxyid')
blind_wavelet.reply()
blind_wavelet.submit_with(wavelet)
self.robot.register_handler(events.WaveletParticipantsChanged, handler)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
self.assertEqual(2, len(operations))
self.assertEquals(ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
operations[0]['method'])
self.assertEquals(ops.WAVELET_APPEND_BLIP, operations[1]['method'])
self.assertEquals('proxyid', operations[1]['params']['proxyingFor'])
def testCapabilitiesHashIncludesContextAndFilter(self):
robot1 = robot.Robot('Robot1')
robot1.register_handler(events.WaveletSelfAdded, lambda: '')
robot2 = robot.Robot('Robot2')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL)
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
robot3 = robot.Robot('Robot3')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL, filter="foo")
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
self.assertNotEqual(robot1.capabilities_hash(), robot3.capabilities_hash())
self.assertNotEqual(robot2.capabilities_hash(), robot3.capabilities_hash())
class TestGetCapabilitiesXml(unittest.TestCase):
def setUp(self):
self.robot = robot.Robot('Testy')
self.robot.capabilities_hash = lambda: '1'
def assertStringsEqual(self, s1, s2):
self.assertEqual(s1, s2, 'Strings differ:\n%s--\n%s' % (s1, s2))
def testDefault(self):
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testUrls(self):
profile_robot = robot.Robot(
'Testy',
image_url='http://example.com/image.png',
profile_url='http://example.com/profile.xml')
profile_robot.capabilities_hash = lambda: '1'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = profile_robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testConsumerKey(self):
# setup_oauth doesn't work during testing, so heavy handed setting of
# properties it is:
self.robot._consumer_key = 'consumer'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:consumer_key>consumer</w:consumer_key>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testCapsAndEvents(self):
self.robot.register_handler(events.BlipSubmitted, None,
context=[events.Context.SELF,
events.Context.ROOT])
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n'
' <w:capability name="%s" context="SELF,ROOT"/>\n'
'</w:capabilities>\n'
'</w:robot>\n') % (ops.PROTOCOL_VERSION, events.BlipSubmitted.type)
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Run robot from the commandline for testing.
This robot_runner let's you define event handlers using flags and takes the
json input from the std in and writes out the json output to stdout.
for example
cat events | commandline_robot_runner.py \
--eventdef-blip_submitted="wavelet.title='title'"
"""
__author__ = 'douwe@google.com (Douwe Osinga)'
import sys
import urllib
from google3.pyglib import app
from google3.pyglib import flags
from google3.walkabout.externalagents import api
from google3.walkabout.externalagents.api import blip
from google3.walkabout.externalagents.api import element
from google3.walkabout.externalagents.api import errors
from google3.walkabout.externalagents.api import events
from google3.walkabout.externalagents.api import ops
from google3.walkabout.externalagents.api import robot
from google3.walkabout.externalagents.api import util
FLAGS = flags.FLAGS
for event in events.ALL:
flags.DEFINE_string('eventdef_' + event.type.lower(),
'',
'Event definition for the %s event' % event.type)
def handle_event(src, bot, e, w):
"""Handle an event by executing the source code src."""
globs = {'e': e, 'w': w, 'api': api, 'bot': bot,
'blip': blip, 'element': element, 'errors': errors,
'events': events, 'ops': ops, 'robot': robot,
'util': util}
exec src in globs
def run_bot(input_file, output_file):
"""Run a robot defined on the command line."""
cmdbot = robot.Robot('Commandline bot')
for event in events.ALL:
src = getattr(FLAGS, 'eventdef_' + event.type.lower())
src = urllib.unquote_plus(src)
if src:
cmdbot.register_handler(event,
lambda event, wavelet, src=src, bot=cmdbot:
handle_event(src, bot, event, wavelet))
json_body = unicode(input_file.read(), 'utf8')
json_response = cmdbot.process_events(json_body)
output_file.write(json_response)
def main(argv):
run_bot(sys.stdin, sys.stdout)
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the robot module."""
import unittest
import events
import ops
import robot
import simplejson
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13","'
'annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/davidbyttow@google.com","value":"David"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
NEW_WAVE_JSON = [{"data":
{"waveletId": "wavesandbox.com!conv+root",
"blipId": "b+LrODcLZkDlu", "waveId":
"wavesandbox.com!w+LrODcLZkDlt"},
"id": "op2"}]
NEW_WAVE_JSON_OLD = [{'data':
[{'data':
{'waveletId': 'googlewave.com!conv+root',
'blipId': 'b+VqQXQbZkCP1',
'waveId': 'googlewave.com!w+VqQXQbZkCP0'},
'id': 'wavelet.create1265055048410'}],
'id': 'op10'}];
class TestRobot(unittest.TestCase):
"""Tests for testing the basic parsing of json in robots."""
def setUp(self):
self.robot = robot.Robot('Testy')
def testCreateWave(self):
self.robot.submit = lambda x: NEW_WAVE_JSON
new_wave = self.robot.new_wave('wavesandbox.com', submit=True)
self.assertEqual('wavesandbox.com!w+LrODcLZkDlt', new_wave.wave_id)
self.robot.submit = lambda x: NEW_WAVE_JSON_OLD
new_wave = self.robot.new_wave('googlewave.com', submit=True)
self.assertEqual('googlewave.com!w+VqQXQbZkCP0', new_wave.wave_id)
def testEventParsing(self):
def check(event, wavelet):
# Test some basic properties; the rest should be covered by
# ops.CreateContext.
root = wavelet.root_blip
self.assertEqual(1, len(wavelet.blips))
self.assertEqual('wdykLROk*13', root.blip_id)
self.assertEqual('test.com!wdykLROk*11', root.wave_id)
self.assertEqual('test.com!conv+root', root.wavelet_id)
self.assertEqual('WAVELET_PARTICIPANTS_CHANGED', event.type)
self.assertEqual({'participantsRemoved': [],
'participantsAdded': ['monty@appspot.com']},
event.properties)
self.robot.test_called = True
self.robot.test_called = False
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
self.assertTrue(self.robot.test_called)
operations = simplejson.loads(json)
# there should be one operation indicating the current version:
self.assertEqual(1, len(operations))
def testWrongEventsIgnored(self):
self.robot.test_called = True
def check(event, wavelet):
called = True
self.robot.test_called = False
self.robot.register_handler(events.BlipSubmitted,
check)
self.robot.process_events(TEST_JSON)
self.assertFalse(self.robot.test_called)
def testOperationParsing(self):
def check(event, wavelet):
wavelet.reply()
wavelet.title = 'new title'
wavelet.root_blip.append_markup('<b>Hello</b>')
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
expected = set([ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
ops.WAVELET_APPEND_BLIP,
ops.WAVELET_SET_TITLE,
ops.DOCUMENT_APPEND_MARKUP])
methods = [operation['method'] for operation in operations]
for method in methods:
self.assertTrue(method in expected)
expected.remove(method)
self.assertEquals(0, len(expected))
def testSerializeWavelets(self):
wavelet = self.robot.blind_wavelet(TEST_JSON)
serialized = wavelet.serialize()
unserialized = self.robot.blind_wavelet(serialized)
self.assertEquals(wavelet.creator, unserialized.creator)
self.assertEquals(wavelet.creation_time, unserialized.creation_time)
self.assertEquals(wavelet.last_modified_time,
unserialized.last_modified_time)
self.assertEquals(wavelet.root_blip.blip_id, unserialized.root_blip.blip_id)
self.assertEquals(wavelet.title, unserialized.title)
self.assertEquals(wavelet.wave_id, unserialized.wave_id)
self.assertEquals(wavelet.wavelet_id, unserialized.wavelet_id)
self.assertEquals(wavelet.domain, unserialized.domain)
def testProxiedBlindWavelet(self):
def handler(event, wavelet):
blind_wavelet = self.robot.blind_wavelet(TEST_JSON, 'proxyid')
blind_wavelet.reply()
blind_wavelet.submit_with(wavelet)
self.robot.register_handler(events.WaveletParticipantsChanged, handler)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
self.assertEqual(2, len(operations))
self.assertEquals(ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
operations[0]['method'])
self.assertEquals(ops.WAVELET_APPEND_BLIP, operations[1]['method'])
self.assertEquals('proxyid', operations[1]['params']['proxyingFor'])
def testCapabilitiesHashIncludesContextAndFilter(self):
robot1 = robot.Robot('Robot1')
robot1.register_handler(events.WaveletSelfAdded, lambda: '')
robot2 = robot.Robot('Robot2')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL)
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
robot3 = robot.Robot('Robot3')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL, filter="foo")
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
self.assertNotEqual(robot1.capabilities_hash(), robot3.capabilities_hash())
self.assertNotEqual(robot2.capabilities_hash(), robot3.capabilities_hash())
class TestGetCapabilitiesXml(unittest.TestCase):
def setUp(self):
self.robot = robot.Robot('Testy')
self.robot.capabilities_hash = lambda: '1'
def assertStringsEqual(self, s1, s2):
self.assertEqual(s1, s2, 'Strings differ:\n%s--\n%s' % (s1, s2))
def testDefault(self):
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testUrls(self):
profile_robot = robot.Robot(
'Testy',
image_url='http://example.com/image.png',
profile_url='http://example.com/profile.xml')
profile_robot.capabilities_hash = lambda: '1'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = profile_robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testConsumerKey(self):
# setup_oauth doesn't work during testing, so heavy handed setting of
# properties it is:
self.robot._consumer_key = 'consumer'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:consumer_key>consumer</w:consumer_key>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testCapsAndEvents(self):
self.robot.register_handler(events.BlipSubmitted, None,
context=[events.Context.SELF,
events.Context.ROOT])
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n'
' <w:capability name="%s" context="SELF,ROOT"/>\n'
'</w:capabilities>\n'
'</w:robot>\n') % (ops.PROTOCOL_VERSION, events.BlipSubmitted.type)
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the wavelet module."""
import unittest
import blip
import element
import ops
import wavelet
import simplejson
ROBOT_NAME = 'robot@appspot.com'
TEST_WAVELET_DATA = {
'creator': ROBOT_NAME,
'creationTime': 100,
'lastModifiedTime': 101,
'participants': [ROBOT_NAME],
'participantsRoles': {ROBOT_NAME: wavelet.Participants.ROLE_FULL},
'rootBlipId': 'blip-1',
'title': 'Title',
'waveId': 'test.com!w+g3h3im',
'waveletId': 'test.com!root+conv',
'tags': ['tag1', 'tag2'],
}
TEST_BLIP_DATA = {
'blipId': TEST_WAVELET_DATA['rootBlipId'],
'childBlipIds': [],
'content': '\ntesting',
'contributors': [TEST_WAVELET_DATA['creator'], 'robot@google.com'],
'creator': TEST_WAVELET_DATA['creator'],
'lastModifiedTime': TEST_WAVELET_DATA['lastModifiedTime'],
'parentBlipId': None,
'waveId': TEST_WAVELET_DATA['waveId'],
'elements': {},
'waveletId': TEST_WAVELET_DATA['waveletId'],
}
class TestWavelet(unittest.TestCase):
"""Tests the wavelet class."""
def setUp(self):
self.operation_queue = ops.OperationQueue()
self.all_blips = {}
self.blip = blip.Blip(TEST_BLIP_DATA,
self.all_blips,
self.operation_queue)
self.all_blips[self.blip.blip_id] = self.blip
self.wavelet = wavelet.Wavelet(TEST_WAVELET_DATA,
self.all_blips,
None,
self.operation_queue)
self.wavelet.robot_address = ROBOT_NAME
def testWaveletProperties(self):
w = self.wavelet
self.assertEquals(TEST_WAVELET_DATA['creator'], w.creator)
self.assertEquals(TEST_WAVELET_DATA['creationTime'], w.creation_time)
self.assertEquals(TEST_WAVELET_DATA['lastModifiedTime'],
w.last_modified_time)
self.assertEquals(len(TEST_WAVELET_DATA['participants']),
len(w.participants))
self.assertTrue(TEST_WAVELET_DATA['participants'][0] in w.participants)
self.assertEquals(TEST_WAVELET_DATA['rootBlipId'], w.root_blip.blip_id)
self.assertEquals(TEST_WAVELET_DATA['title'], w.title)
self.assertEquals(TEST_WAVELET_DATA['waveId'], w.wave_id)
self.assertEquals(TEST_WAVELET_DATA['waveletId'], w.wavelet_id)
self.assertEquals('test.com', w.domain)
def testWaveletMethods(self):
w = self.wavelet
reply = w.reply()
self.assertEquals(2, len(w.blips))
w.delete(reply)
self.assertEquals(1, len(w.blips))
self.assertEquals(0, len(w.data_documents))
self.wavelet.data_documents['key'] = 'value'
self.assert_('key' in w.data_documents)
self.assertEquals(1, len(w.data_documents))
for key in w.data_documents:
self.assertEquals(key, 'key')
self.assertEquals(1, len(w.data_documents.keys()))
self.wavelet.data_documents['key'] = None
self.assertEquals(0, len(w.data_documents))
num_participants = len(w.participants)
w.proxy_for('proxy').reply()
self.assertEquals(2, len(w.blips))
# check that the new proxy for participant was added
self.assertEquals(num_participants + 1, len(w.participants))
w._robot_address = ROBOT_NAME.replace('@', '+proxy@')
w.proxy_for('proxy').reply()
self.assertEquals(num_participants + 1, len(w.participants))
self.assertEquals(3, len(w.blips))
def testSetTitle(self):
self.blip._content = '\nOld title\n\nContent'
self.wavelet.title = 'New title \xd0\xb0\xd0\xb1\xd0\xb2'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals(u'\nNew title \u0430\u0431\u0432\n\nContent',
self.blip._content)
def testSetTitleAdjustRootBlipWithOneLineProperly(self):
self.blip._content = '\nOld title'
self.wavelet.title = 'New title'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals('\nNew title\n', self.blip._content)
def testSetTitleAdjustEmptyRootBlipProperly(self):
self.blip._content = '\n'
self.wavelet.title = 'New title'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals('\nNew title\n', self.blip._content)
def testTags(self):
w = self.wavelet
self.assertEquals(2, len(w.tags))
w.tags.append('tag3')
self.assertEquals(3, len(w.tags))
w.tags.append('tag3')
self.assertEquals(3, len(w.tags))
w.tags.remove('tag1')
self.assertEquals(2, len(w.tags))
self.assertEquals('tag2', w.tags[0])
def testParticipantRoles(self):
w = self.wavelet
self.assertEquals(wavelet.Participants.ROLE_FULL,
w.participants.get_role(ROBOT_NAME))
w.participants.set_role(ROBOT_NAME, wavelet.Participants.ROLE_READ_ONLY)
self.assertEquals(wavelet.Participants.ROLE_READ_ONLY,
w.participants.get_role(ROBOT_NAME))
def testSerialize(self):
self.blip.append(element.Gadget('http://test.com', {'a': 3}))
self.wavelet.title = 'A wavelet title'
self.blip.append(element.Image(url='http://www.google.com/logos/clickortreat1.gif',
width=320, height=118))
self.blip.append(element.Attachment(caption='fake', data='fake data'))
self.blip.append(element.Line(line_type='li', indent='2'))
self.blip.append('bulleted!')
self.blip.append(element.Installer(
'http://wave-skynet.appspot.com/public/extensions/areyouin/manifest.xml'))
self.wavelet.proxy_for('proxy').reply().append('hi from douwe')
inlineBlip = self.blip.insert_inline_blip(5)
inlineBlip.append('hello again!')
serialized = self.wavelet.serialize()
serialized = simplejson.dumps(serialized)
self.assertTrue(serialized.find('test.com') > 0)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various API-specific exception classes.
This module contains various specific exception classes that are raised by
the library back to the client.
"""
class Error(Exception):
"""Base library error type."""
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines classes that are needed to model a wavelet."""
import blip
import errors
import util
class DataDocs(object):
"""Class modeling a bunch of data documents in pythonic way."""
def __init__(self, init_docs, wave_id, wavelet_id, operation_queue):
self._docs = init_docs
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __iter__(self):
return self._docs.__iter__()
def __contains__(self, key):
return key in self._docs
def __delitem__(self, key):
if not key in self._docs:
return
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, None)
del self._docs[key]
def __getitem__(self, key):
return self._docs[key]
def __setitem__(self, key, value):
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, value)
if value is None and key in self._docs:
del self._docs[key]
else:
self._docs[key] = value
def __len__(self):
return len(self._docs)
def keys(self):
return self._docs.keys()
def serialize(self):
"""Returns a dictionary of the data documents."""
return self._docs
class Participants(object):
"""Class modelling a set of participants in pythonic way."""
#: Designates full access (read/write) role.
ROLE_FULL = "FULL"
#: Designates read-only role.
ROLE_READ_ONLY = "READ_ONLY"
def __init__(self, participants, roles, wave_id, wavelet_id, operation_queue):
self._participants = set(participants)
self._roles = roles.copy()
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __contains__(self, participant):
return participant in self._participants
def __len__(self):
return len(self._participants)
def __iter__(self):
return self._participants.__iter__()
def add(self, participant_id):
"""Adds a participant by their ID (address)."""
self._operation_queue.wavelet_add_participant(
self._wave_id, self._wavelet_id, participant_id)
self._participants.add(participant_id)
def get_role(self, participant_id):
"""Return the role for the given participant_id."""
return self._roles.get(participant_id, Participants.ROLE_FULL)
def set_role(self, participant_id, role):
"""Sets the role for the given participant_id."""
if role != Participants.ROLE_FULL and role != Participants.ROLE_READ_ONLY:
raise ValueError(role + ' is not a valid role')
self._operation_queue.wavelet_modify_participant_role(
self._wave_id, self._wavelet_id, participant_id, role)
self._roles[participant_id] = role
def serialize(self):
"""Returns a list of the participants."""
return list(self._participants)
class Tags(object):
"""Class modelling a list of tags."""
def __init__(self, tags, wave_id, wavelet_id, operation_queue):
self._tags = list(tags)
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __getitem__(self, index):
return self._tags[index]
def __len__(self):
return len(self._tags)
def __iter__(self):
return self._tags.__iter__()
def append(self, tag):
"""Appends a tag if it doesn't already exist."""
tag = util.force_unicode(tag)
if tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag)
self._tags.append(tag)
def remove(self, tag):
"""Removes a tag if it exists."""
tag = util.force_unicode(tag)
if not tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag, modify_how='remove')
self._tags.remove(tag)
def serialize(self):
"""Returns a list of tags."""
return list(self._tags)
class Wavelet(object):
"""Models a single wavelet.
A single wavelet is composed of metadata, participants, and its blips.
To guarantee that all blips are available, specify Context.ALL for events.
"""
def __init__(self, json, blips, robot, operation_queue):
"""Inits this wavelet with JSON data.
Args:
json: JSON data dictionary from Wave server.
blips: a dictionary object that can be used to resolve blips.
robot: the robot owning this wavelet.
operation_queue: an OperationQueue object to be used to
send any generated operations to.
"""
self._robot = robot
self._operation_queue = operation_queue
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
self._creator = json.get('creator')
self._creation_time = json.get('creationTime', 0)
self._data_documents = DataDocs(json.get('dataDocuments', {}),
self._wave_id,
self._wavelet_id,
operation_queue)
self._last_modified_time = json.get('lastModifiedTime')
self._participants = Participants(json.get('participants', []),
json.get('participantRoles', {}),
self._wave_id,
self._wavelet_id,
operation_queue)
self._title = json.get('title', '')
self._tags = Tags(json.get('tags', []),
self._wave_id,
self._wavelet_id,
operation_queue)
self._raw_data = json
self._blips = blip.Blips(blips)
self._root_blip_id = json.get('rootBlipId')
if self._root_blip_id and self._root_blip_id in self._blips:
self._root_blip = self._blips[self._root_blip_id]
else:
self._root_blip = None
self._robot_address = None
@property
def wavelet_id(self):
"""Returns this wavelet's id."""
return self._wavelet_id
@property
def wave_id(self):
"""Returns this wavelet's parent wave id."""
return self._wave_id
@property
def creator(self):
"""Returns the participant id of the creator of this wavelet."""
return self._creator
@property
def creation_time(self):
"""Returns the time that this wavelet was first created in milliseconds."""
return self._creation_time
@property
def data_documents(self):
"""Returns the data documents for this wavelet based on key name."""
return self._data_documents
@property
def domain(self):
"""Return the domain that wavelet belongs to."""
p = self._wave_id.find('!')
if p == -1:
return None
else:
return self._wave_id[:p]
@property
def last_modified_time(self):
"""Returns the time that this wavelet was last modified in ms."""
return self._last_modified_time
@property
def participants(self):
"""Returns a set of participants on this wavelet."""
return self._participants
@property
def tags(self):
"""Returns a list of tags for this wavelet."""
return self._tags
@property
def robot(self):
"""The robot that owns this wavelet."""
return self._robot
def _get_title(self):
return self._title
def _set_title(self, title):
title = util.force_unicode(title)
if title.find('\n') != -1:
raise errors.Error('Wavelet title should not contain a newline ' +
'character. Specified: ' + title)
self._operation_queue.wavelet_set_title(self.wave_id, self.wavelet_id,
title)
self._title = title
# Adjust the content of the root blip, if it is available in the context.
if self._root_blip:
content = '\n'
splits = self._root_blip._content.split('\n', 2)
if len(splits) == 3:
content += splits[2]
self._root_blip._content = '\n' + title + content
#: Returns or sets the wavelet's title.
title = property(_get_title, _set_title,
doc='Get or set the title of the wavelet.')
def _get_robot_address(self):
return self._robot_address
def _set_robot_address(self, address):
if self._robot_address:
raise errors.Error('robot address already set')
self._robot_address = address
robot_address = property(_get_robot_address, _set_robot_address,
doc='Get or set the address of the current robot.')
@property
def root_blip(self):
"""Returns this wavelet's root blip."""
return self._root_blip
@property
def blips(self):
"""Returns the blips for this wavelet."""
return self._blips
def get_operation_queue(self):
"""Returns the OperationQueue for this wavelet."""
return self._operation_queue
def serialize(self):
"""Return a dict of the wavelet properties."""
return {'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'creator': self._creator,
'creationTime': self._creation_time,
'dataDocuments': self._data_documents.serialize(),
'lastModifiedTime': self._last_modified_time,
'participants': self._participants.serialize(),
'title': self._title,
'blips': self._blips.serialize(),
'rootBlipId': self._root_blip_id
}
def proxy_for(self, proxy_for_id):
"""Return a view on this wavelet that will proxy for the specified id.
A shallow copy of the current wavelet is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
If the wavelet was retrieved using the Active Robot API, that is
by fetch_wavelet, then the address of the robot must be added to the
wavelet by setting wavelet.robot_address before calling proxy_for().
"""
self.add_proxying_participant(proxy_for_id)
operation_queue = self.get_operation_queue().proxy_for(proxy_for_id)
res = Wavelet(json={},
blips={},
robot=self.robot,
operation_queue=operation_queue)
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._creator = self._creator
res._creation_time = self._creation_time
res._data_documents = self._data_documents
res._last_modified_time = self._last_modified_time
res._participants = self._participants
res._title = self._title
res._raw_data = self._raw_data
res._blips = self._blips
res._root_blip = self._root_blip
return res
def add_proxying_participant(self, id):
"""Ads a proxying participant to the wave.
Proxying participants are of the form robot+proxy@domain.com. This
convenience method constructs this id and then calls participants.add.
"""
if not self.robot_address:
raise errors.Error(
'Need a robot address to add a proxying for participant')
robotid, domain = self.robot_address.split('@', 1)
if '#' in robotid:
robotid, version = robotid.split('#')
else:
version = None
if '+' in robotid:
newid = robotid.split('+', 1)[0] + '+' + id
else:
newid = robotid + '+' + id
if version:
newid += '#' + version
newid += '@' + domain
self.participants.add(newid)
def submit_with(self, other_wavelet):
"""Submit this wavelet when the passed other wavelet is submited.
wavelets constructed outside of the event callback need to
be either explicitly submited using robot.submit(wavelet) or be
associated with a different wavelet that will be submited or
is part of the event callback.
"""
other_wavelet._operation_queue.copy_operations(self._operation_queue)
self._operation_queue = other_wavelet._operation_queue
def reply(self, initial_content=None):
"""Replies to the conversation in this wavelet.
Args:
initial_content: If set, start with this (string) content.
Returns:
A transient version of the blip that contains the reply.
"""
if not initial_content:
initial_content = u'\n'
initial_content = util.force_unicode(initial_content)
blip_data = self._operation_queue.wavelet_append_blip(
self.wave_id, self.wavelet_id, initial_content)
instance = blip.Blip(blip_data, self._blips, self._operation_queue)
self._blips._add(instance)
return instance
def delete(self, todelete):
"""Remove a blip from this wavelet.
Args:
todelete: either a blip or a blip id to be removed.
"""
if isinstance(todelete, blip.Blip):
blip_id = todelete.blip_id
else:
blip_id = todelete
self._operation_queue.blip_delete(self.wave_id, self.wavelet_id, blip_id)
self._blips._remove_with_id(blip_id)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Tests for google3.walkabout.externalagents.api.commandline_robot_runner."""
__author__ = 'douwe@google.com (Douwe Osinga)'
import StringIO
from google3.pyglib import app
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.walkabout.externalagents.api import commandline_robot_runner
from google3.walkabout.externalagents.api import events
FLAGS = flags.FLAGS
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13",'
'"annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/otherguy@test.com","value":"Other"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
class CommandlineRobotRunnerTest(googletest.TestCase):
def testSimpleFlow(self):
FLAGS.eventdef_wavelet_participants_changed = 'x'
flag = 'eventdef_' + events.WaveletParticipantsChanged.type.lower()
setattr(FLAGS, flag, 'w.title="New title!"')
input_stream = StringIO.StringIO(TEST_JSON)
output_stream = StringIO.StringIO()
commandline_robot_runner.run_bot(input_stream, output_stream)
res = output_stream.getvalue()
self.assertTrue('wavelet.setTitle' in res)
def main(unused_argv):
googletest.main()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run all unit tests in this package."""
import blip_test
import element_test
import module_test_runner
import ops_test
import robot_test
import util_test
import wavelet_test
def RunUnitTests():
"""Runs all registered unit tests."""
test_runner = module_test_runner.ModuleTestRunner()
test_runner.modules = [
blip_test,
element_test,
ops_test,
robot_test,
util_test,
wavelet_test,
]
test_runner.RunAllTests()
if __name__ == "__main__":
RunUnitTests()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the util module."""
__author__ = 'davidbyttow@google.com (David Byttow)'
import unittest
import ops
import util
class TestUtils(unittest.TestCase):
"""Tests utility functions."""
def testIsIterable(self):
self.assertTrue(util.is_iterable([]))
self.assertTrue(util.is_iterable({}))
self.assertTrue(util.is_iterable(set()))
self.assertTrue(util.is_iterable(()))
self.assertFalse(util.is_iterable(42))
self.assertFalse(util.is_iterable('list?'))
self.assertFalse(util.is_iterable(object))
def testIsDict(self):
self.assertFalse(util.is_dict([]))
self.assertTrue(util.is_dict({}))
self.assertFalse(util.is_dict(set()))
self.assertFalse(util.is_dict(()))
self.assertFalse(util.is_dict(42))
self.assertFalse(util.is_dict('dict?'))
self.assertFalse(util.is_dict(object))
def testIsUserDefinedNewStyleClass(self):
class OldClass:
pass
class NewClass(object):
pass
self.assertFalse(util.is_user_defined_new_style_class(OldClass()))
self.assertTrue(util.is_user_defined_new_style_class(NewClass()))
self.assertFalse(util.is_user_defined_new_style_class({}))
self.assertFalse(util.is_user_defined_new_style_class(()))
self.assertFalse(util.is_user_defined_new_style_class(42))
self.assertFalse(util.is_user_defined_new_style_class('instance?'))
def testLowerCamelCase(self):
self.assertEquals('foo', util.lower_camel_case('foo'))
self.assertEquals('fooBar', util.lower_camel_case('foo_bar'))
self.assertEquals('fooBar', util.lower_camel_case('fooBar'))
self.assertEquals('blipId', util.lower_camel_case('blip_id'))
self.assertEquals('fooBar', util.lower_camel_case('foo__bar'))
self.assertEquals('fooBarBaz', util.lower_camel_case('foo_bar_baz'))
self.assertEquals('f', util.lower_camel_case('f'))
self.assertEquals('f', util.lower_camel_case('f_'))
self.assertEquals('', util.lower_camel_case(''))
self.assertEquals('', util.lower_camel_case('_'))
self.assertEquals('aBCDEF', util.lower_camel_case('_a_b_c_d_e_f_'))
def assertListsEqual(self, a, b):
self.assertEquals(len(a), len(b))
for i in range(len(a)):
self.assertEquals(a[i], b[i])
def assertDictsEqual(self, a, b):
self.assertEquals(len(a.keys()), len(b.keys()))
for k, v in a.iteritems():
self.assertEquals(v, b[k])
def testSerializeList(self):
data = [1, 2, 3]
output = util.serialize(data)
self.assertListsEqual(data, output)
def testSerializeDict(self):
data = {'key': 'value', 'under_score': 'value2'}
expected = {'key': 'value', 'underScore': 'value2'}
output = util.serialize(data)
self.assertDictsEqual(expected, output)
def testNonNoneDict(self):
a = {'a': 1, 'b': 1}
self.assertDictsEqual(a, util.non_none_dict(a))
b = a.copy()
b['c'] = None
self.assertDictsEqual(a, util.non_none_dict(b))
def testForceUnicode(self):
self.assertEquals(u"aaa", util.force_unicode("aaa"))
self.assertEquals(u"12", util.force_unicode(12))
self.assertEquals(u"\u0430\u0431\u0432",
util.force_unicode("\xd0\xb0\xd0\xb1\xd0\xb2"))
self.assertEquals(u'\u30e6\u30cb\u30b3\u30fc\u30c9',
util.force_unicode(u'\u30e6\u30cb\u30b3\u30fc\u30c9'))
def testSerializeAttributes(self):
class Data(object):
def __init__(self):
self.public = 1
self._protected = 2
self.__private = 3
def Func(self):
pass
data = Data()
output = util.serialize(data)
# Functions and non-public fields should not be serialized.
self.assertEquals(1, len(output.keys()))
self.assertEquals(data.public, output['public'])
def testStringEnum(self):
util.StringEnum()
single = util.StringEnum('foo')
self.assertEquals('foo', single.foo)
multi = util.StringEnum('foo', 'bar')
self.assertEquals('foo', multi.foo)
self.assertEquals('bar', multi.bar)
def testParseMarkup(self):
self.assertEquals('foo', util.parse_markup('foo'))
self.assertEquals('foo bar', util.parse_markup('foo <b>bar</b>'))
self.assertEquals('foo\nbar', util.parse_markup('foo<br>bar'))
self.assertEquals('foo\nbar', util.parse_markup('foo<p indent="3">bar'))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Run robot from the commandline for testing.
This robot_runner let's you define event handlers using flags and takes the
json input from the std in and writes out the json output to stdout.
for example
cat events | commandline_robot_runner.py \
--eventdef-blip_submitted="wavelet.title='title'"
"""
__author__ = 'douwe@google.com (Douwe Osinga)'
import sys
import urllib
from google3.pyglib import app
from google3.pyglib import flags
from google3.walkabout.externalagents import api
from google3.walkabout.externalagents.api import blip
from google3.walkabout.externalagents.api import element
from google3.walkabout.externalagents.api import errors
from google3.walkabout.externalagents.api import events
from google3.walkabout.externalagents.api import ops
from google3.walkabout.externalagents.api import robot
from google3.walkabout.externalagents.api import util
FLAGS = flags.FLAGS
for event in events.ALL:
flags.DEFINE_string('eventdef_' + event.type.lower(),
'',
'Event definition for the %s event' % event.type)
def handle_event(src, bot, e, w):
"""Handle an event by executing the source code src."""
globs = {'e': e, 'w': w, 'api': api, 'bot': bot,
'blip': blip, 'element': element, 'errors': errors,
'events': events, 'ops': ops, 'robot': robot,
'util': util}
exec src in globs
def run_bot(input_file, output_file):
"""Run a robot defined on the command line."""
cmdbot = robot.Robot('Commandline bot')
for event in events.ALL:
src = getattr(FLAGS, 'eventdef_' + event.type.lower())
src = urllib.unquote_plus(src)
if src:
cmdbot.register_handler(event,
lambda event, wavelet, src=src, bot=cmdbot:
handle_event(src, bot, event, wavelet))
json_body = unicode(input_file.read(), 'utf8')
json_response = cmdbot.process_events(json_body)
output_file.write(json_response)
def main(argv):
run_bot(sys.stdin, sys.stdout)
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for operations that can be applied to the server.
Contains classes and utilities for creating operations that are to be
applied on the server.
"""
import errors
import random
import util
import sys
PROTOCOL_VERSION = '0.21'
# Operation Types
WAVELET_APPEND_BLIP = 'wavelet.appendBlip'
WAVELET_SET_TITLE = 'wavelet.setTitle'
WAVELET_ADD_PARTICIPANT = 'wavelet.participant.add'
WAVELET_DATADOC_SET = 'wavelet.datadoc.set'
WAVELET_MODIFY_TAG = 'wavelet.modifyTag'
WAVELET_MODIFY_PARTICIPANT_ROLE = 'wavelet.modifyParticipantRole'
BLIP_CREATE_CHILD = 'blip.createChild'
BLIP_DELETE = 'blip.delete'
DOCUMENT_APPEND_MARKUP = 'document.appendMarkup'
DOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert'
DOCUMENT_MODIFY = 'document.modify'
ROBOT_CREATE_WAVELET = 'robot.createWavelet'
ROBOT_FETCH_WAVE = 'robot.fetchWave'
ROBOT_NOTIFY_CAPABILITIES_HASH = 'robot.notifyCapabilitiesHash'
class Operation(object):
"""Represents a generic operation applied on the server.
This operation class contains data that is filled in depending on the
operation type.
It can be used directly, but doing so will not result
in local, transient reflection of state on the blips. In other words,
creating a 'delete blip' operation will not remove the blip from the local
context for the duration of this session. It is better to use the OpBased
model classes directly instead.
"""
def __init__(self, method, opid, params):
"""Initializes this operation with contextual data.
Args:
method: Method to call or type of operation.
opid: The id of the operation. Any callbacks will refer to these.
params: An operation type dependent dictionary
"""
self.method = method
self.id = opid
self.params = params
def __str__(self):
return '%s[%s]%s' % (self.method, self.id, str(self.params))
def set_param(self, param, value):
self.params[param] = value
return self
def serialize(self, method_prefix=''):
"""Serialize the operation.
Args:
method_prefix: prefixed for each method name to allow for specifying
a namespace.
Returns:
a dict representation of the operation.
"""
if method_prefix and not method_prefix.endswith('.'):
method_prefix += '.'
return {'method': method_prefix + self.method,
'id': self.id,
'params': util.serialize(self.params)}
def set_optional(self, param, value):
"""Sets an optional parameter.
If value is None or "", this is a no op. Otherwise it calls
set_param.
"""
if value == '' or value is None:
return self
else:
return self.set_param(param, value)
class OperationQueue(object):
"""Wraps the queuing of operations using easily callable functions.
The operation queue wraps single operations as functions and queues the
resulting operations in-order. Typically there shouldn't be a need to
call this directly unless operations are needed on entities outside
of the scope of the robot. For example, to modify a blip that
does not exist in the current context, you might specify the wave, wavelet
and blip id to generate an operation.
Any calls to this will not be reflected in the robot in any way.
For example, calling wavelet_append_blip will not result in a new blip
being added to the robot, only an operation to be applied on the
server.
"""
# Some class global counters:
_next_operation_id = 1
def __init__(self, proxy_for_id=None):
self.__pending = []
self._capability_hash = 0
self._proxy_for_id = proxy_for_id
def _new_blipdata(self, wave_id, wavelet_id, initial_content='',
parent_blip_id=None):
"""Creates JSON of the blip used for this session."""
temp_blip_id = 'TBD_%s_%s' % (wavelet_id,
hex(random.randint(0, sys.maxint)))
return {'waveId': wave_id,
'waveletId': wavelet_id,
'blipId': temp_blip_id,
'content': initial_content,
'parentBlipId': parent_blip_id}
def _new_waveletdata(self, domain, participants):
"""Creates an ephemeral WaveletData instance used for this session.
Args:
domain: the domain to create the data for.
participants initially on the wavelet
Returns:
Blipdata (for the rootblip), WaveletData.
"""
wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint))
wavelet_id = domain + '!conv+root'
root_blip_data = self._new_blipdata(wave_id, wavelet_id)
participants = set(participants)
wavelet_data = {'waveId': wave_id,
'waveletId': wavelet_id,
'rootBlipId': root_blip_data['blipId'],
'participants': participants}
return root_blip_data, wavelet_data
def __len__(self):
return len(self.__pending)
def __iter__(self):
return self.__pending.__iter__()
def clear(self):
self.__pending = []
def proxy_for(self, proxy):
"""Return a view of this operation queue with the proxying for set to proxy.
This method returns a new instance of an operation queue that shares the
operation list, but has a different proxying_for_id set so the robot using
this new queue will send out operations with the proxying_for field set.
"""
res = OperationQueue()
res.__pending = self.__pending
res._capability_hash = self._capability_hash
res._proxy_for_id = proxy
return res
def set_capability_hash(self, capability_hash):
self._capability_hash = capability_hash
def serialize(self):
first = Operation(ROBOT_NOTIFY_CAPABILITIES_HASH,
'0',
{'capabilitiesHash': self._capability_hash,
'protocolVersion': PROTOCOL_VERSION})
operations = [first] + self.__pending
res = util.serialize(operations)
return res
def copy_operations(self, other_queue):
"""Copy the pending operations from other_queue into this one."""
for op in other_queue:
self.__pending.append(op)
def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops):
"""Creates and adds a new operation to the operation list."""
if props is None:
props = {}
props.update(kwprops)
props['waveId'] = wave_id
props['waveletId'] = wavelet_id
if self._proxy_for_id:
props['proxyingFor'] = self._proxy_for_id
operation = Operation(method,
'op%s' % OperationQueue._next_operation_id,
props)
self.__pending.append(operation)
OperationQueue._next_operation_id += 1
return operation
def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''):
"""Appends a blip to a wavelet.
Args:
wave_id: The wave id owning the containing wavelet.
wavelet_id: The wavelet id that this blip should be appended to.
initial_content: optionally the content to start with
Returns:
JSON representing the information of the new blip.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content)
self.new_operation(WAVELET_APPEND_BLIP, wave_id,
wavelet_id, blipData=blip_data)
return blip_data
def wavelet_add_participant(self, wave_id, wavelet_id, participant_id):
"""Adds a participant to a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id,
participantId=participant_id)
def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data):
"""Sets a key/value pair on the data document of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
name: The key name for this data.
data: The value of the data to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id,
datadocName=name, datadocValue=data)
def robot_create_wavelet(self, domain, participants=None, message=''):
"""Creates a new wavelet.
Args:
domain: the domain to create the wave in
participants: initial participants on this wavelet or None if none
message: an optional payload that is returned with the corresponding
event.
Returns:
data for the root_blip, wavelet
"""
if participants is None:
participants = []
blip_data, wavelet_data = self._new_waveletdata(domain, participants)
op = self.new_operation(ROBOT_CREATE_WAVELET,
wave_id=wavelet_data['waveId'],
wavelet_id=wavelet_data['waveletId'],
waveletData=wavelet_data)
op.set_optional('message', message)
return blip_data, wavelet_data
def robot_fetch_wave(self, wave_id, wavelet_id):
"""Requests a snapshot of the specified wave.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id)
def wavelet_set_title(self, wave_id, wavelet_id, title):
"""Sets the title of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
title: The title to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id,
waveletTitle=title)
def wavelet_modify_participant_role(
self, wave_id, wavelet_id, participant_id, role):
"""Modify the role of a participant on a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
role: the new roles
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_MODIFY_PARTICIPANT_ROLE, wave_id,
wavelet_id, participantId=participant_id,
participantRole=role)
def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None):
"""Modifies a tag in a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
tag: The tag (a string).
modify_how: (optional) how to apply the tag. The default is to add
the tag. Specify 'remove' to remove. Specify None or 'add' to
add.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id,
name=tag).set_optional("modify_how", modify_how)
def blip_create_child(self, wave_id, wavelet_id, blip_id):
"""Creates a child blip of another blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
JSON of blip for which further operations can be applied.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id)
self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id,
blipId=blip_id,
blipData=blip_data)
return blip_data
def blip_delete(self, wave_id, wavelet_id, blip_id):
"""Deletes the specified blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id)
def document_append_markup(self, wave_id, wavelet_id, blip_id, content):
"""Appends content with markup to a document.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
content: The markup content to append.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id,
blipId=blip_id, content=content)
def document_modify(self, wave_id, wavelet_id, blip_id):
"""Creates and queues a document modify operation
The returned operation still needs to be filled with details before
it makes sense.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_MODIFY,
wave_id,
wavelet_id,
blipId=blip_id)
def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position):
"""Inserts an inline blip at a specific location.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
position: The position in the document to insert the blip.
Returns:
JSON data for the blip that was created for further operations.
"""
inline_blip_data = self._new_blipdata(wave_id, wavelet_id)
inline_blip_data['parentBlipId'] = blip_id
self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id,
blipId=blip_id,
index=position,
blipData=inline_blip_data)
return inline_blip_data
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to run wave robots on app engine."""
import logging
import sys
import events
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class CapabilitiesHandler(webapp.RequestHandler):
"""Handler to forward a request ot a handler of a robot."""
def __init__(self, method, contenttype):
"""Initializes this handler with a specific robot."""
self._method = method
self._contenttype = contenttype
def get(self):
"""Handles HTTP GET request."""
self.response.headers['Content-Type'] = self._contenttype
self.response.out.write(self._method())
class ProfileHandler(webapp.RequestHandler):
"""Handler to forward a request ot a handler of a robot."""
def __init__(self, method, contenttype):
"""Initializes this handler with a specific robot."""
self._method = method
self._contenttype = contenttype
def get(self):
"""Handles HTTP GET request."""
self.response.headers['Content-Type'] = self._contenttype
# Respond with proxied profile if name specified
if self.request.get('name'):
self.response.out.write(self._method(self.request.get('name')))
else:
self.response.out.write(self._method())
class RobotEventHandler(webapp.RequestHandler):
"""Handler for the dispatching of events to various handlers to a robot.
This handler only responds to post events with a JSON post body. Its primary
task is to separate out the context data from the events in the post body
and dispatch all events in order. Once all events have been dispatched
it serializes the context data and its associated operations as a response.
"""
def __init__(self, robot):
"""Initializes self with a specific robot."""
self._robot = robot
def get(self):
"""Handles the get event for debugging.
This is useful for debugging but since event bundles tend to be
rather big it often won't fit for more complex requests.
"""
ops = self.request.get('events')
if ops:
self.request.body = events
self.post()
def post(self):
"""Handles HTTP POST requests."""
json_body = self.request.body
if not json_body:
# TODO(davidbyttow): Log error?
return
# Redirect stdout to stderr while executing handlers. This way, any stray
# "print" statements in bot code go to the error logs instead of breaking
# the JSON response sent to the HTTP channel.
saved_stdout, sys.stdout = sys.stdout, sys.stderr
json_body = unicode(json_body, 'utf8')
logging.info('Incoming: %s', json_body)
json_response = self._robot.process_events(json_body)
logging.info('Outgoing: %s', json_response)
sys.stdout = saved_stdout
# Build the response.
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(json_response.encode('utf-8'))
def operation_error_handler(event, wavelet):
"""Default operation error handler, logging what went wrong."""
if isinstance(event, events.OperationError):
logging.error('Previously operation failed: id=%s, message: %s',
event.operation_id, event.error_message)
def appengine_post(url, data, headers):
result = urlfetch.fetch(
method='POST',
url=url,
payload=data,
headers=headers,
deadline=10)
return result.status_code, result.content
class RobotVerifyTokenHandler(webapp.RequestHandler):
"""Handler for the token_verify request."""
def __init__(self, robot):
"""Initializes self with a specific robot."""
self._robot = robot
def get(self):
"""Handles the get event for debugging. Ops usually too long."""
token, st = self._robot.get_verification_token_info()
logging.info('token=' + token)
if token is None:
self.error(404)
self.response.out.write('No token set')
return
if not st is None:
if self.request.get('st') != st:
self.response.out.write('Invalid st value passed')
return
self.response.out.write(token)
def create_robot_webapp(robot, debug=False, extra_handlers=None):
"""Returns an instance of webapp.WSGIApplication with robot handlers."""
if not extra_handlers:
extra_handlers = []
return webapp.WSGIApplication([('.*/_wave/capabilities.xml',
lambda: CapabilitiesHandler(
robot.capabilities_xml,
'application/xml')),
('.*/_wave/robot/profile',
lambda: ProfileHandler(
robot.profile_json,
'application/json')),
('.*/_wave/robot/jsonrpc',
lambda: RobotEventHandler(robot)),
('.*/_wave/verify_token',
lambda: RobotVerifyTokenHandler(robot)),
] + extra_handlers,
debug=debug)
def run(robot, debug=False, log_errors=True, extra_handlers=None):
"""Sets up the webapp handlers for this robot and starts listening.
A robot is typically setup in the following steps:
1. Instantiate and define robot.
2. Register various handlers that it is interested in.
3. Call Run, which will setup the handlers for the app.
For example:
robot = Robot('Terminator',
image_url='http://www.sky.net/models/t800.png',
profile_url='http://www.sky.net/models/t800.html')
robot.register_handler(WAVELET_PARTICIPANTS_CHANGED, KillParticipant)
run(robot)
Args:
robot: the robot to run. This robot is modified to use app engines
urlfetch for posting http.
debug: Optional variable that defaults to False and is passed through
to the webapp application to determine if it should show debug info.
log_errors: Optional flag that defaults to True and determines whether
a default handlers to catch errors should be setup that uses the
app engine logging to log errors.
extra_handlers: Optional list of tuples that are passed to the webapp
to install more handlers. For example, passing
[('/about', AboutHandler),] would install an extra about handler
for the robot.
"""
# App Engine expects to construct a class with no arguments, so we
# pass a lambda that constructs the appropriate handler with
# arguments from the enclosing scope.
if log_errors:
robot.register_handler(events.OperationError, operation_error_handler)
robot.http_post = appengine_post
app = create_robot_webapp(robot, debug, extra_handlers)
run_wsgi_app(app)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the ops module."""
import unittest
import ops
class TestOperation(unittest.TestCase):
"""Test case for Operation class."""
def testFields(self):
op = ops.Operation(ops.WAVELET_SET_TITLE, 'opid02',
{'waveId': 'wavelet-id',
'title': 'a title'})
self.assertEqual(ops.WAVELET_SET_TITLE, op.method)
self.assertEqual('opid02', op.id)
self.assertEqual(2, len(op.params))
def testConstructModifyTag(self):
q = ops.OperationQueue()
op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag')
self.assertEqual(3, len(op.params))
op = q.wavelet_modify_tag(
'waveid', 'waveletid', 'tag', modify_how='remove')
self.assertEqual(4, len(op.params))
def testConstructRobotFetchWave(self):
q = ops.OperationQueue('proxyid')
op = q.robot_fetch_wave('wave1', 'wavelet1')
self.assertEqual(3, len(op.params))
self.assertEqual('proxyid', op.params['proxyingFor'])
self.assertEqual('wave1', op.params['waveId'])
self.assertEqual('wavelet1', op.params['waveletId'])
class TestOperationQueue(unittest.TestCase):
"""Test case for OperationQueue class."""
def testSerialize(self):
q = ops.OperationQueue()
q.set_capability_hash('hash')
op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag')
json = q.serialize()
self.assertEqual(2, len(json))
self.assertEqual('robot.notifyCapabilitiesHash', json[0]['method'])
self.assertEqual('hash', json[0]['params']['capabilitiesHash'])
self.assertEqual(ops.PROTOCOL_VERSION, json[0]['params']['protocolVersion'])
self.assertEqual('wavelet.modifyTag', json[1]['method'])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility library containing various helpers used by the API."""
import re
CUSTOM_SERIALIZE_METHOD_NAME = 'serialize'
MARKUP_RE = re.compile(r'<([^>]*?)>')
def force_unicode(object):
""" Return the Unicode string version of object, with UTF-8 encoding. """
if isinstance(object, unicode):
return object
return unicode(str(object), 'utf-8')
def parse_markup(markup):
"""Parses a bit of markup into robot compatible text.
For now this is a rough approximation.
"""
def replace_tag(group):
if not group.groups:
return ''
tag = group.groups()[0].split(' ', 1)[0]
if (tag == 'p' or tag == 'br'):
return '\n'
return ''
return MARKUP_RE.sub(replace_tag, markup)
def is_iterable(inst):
"""Returns whether or not this is a list, tuple, set or dict .
Note that this does not return true for strings.
"""
return hasattr(inst, '__iter__')
def is_dict(inst):
"""Returns whether or not the specified instance is a dict."""
return hasattr(inst, 'iteritems')
def is_user_defined_new_style_class(obj):
"""Returns whether or not the specified instance is a user-defined type."""
return type(obj).__module__ != '__builtin__'
def lower_camel_case(s):
"""Converts a string to lower camel case.
Examples:
foo => foo
foo_bar => fooBar
foo__bar => fooBar
foo_bar_baz => fooBarBaz
Args:
s: The string to convert to lower camel case.
Returns:
The lower camel cased string.
"""
return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_'))
def non_none_dict(d):
"""return a copy of the dictionary without none values."""
return dict([a for a in d.items() if not a[1] is None])
def _serialize_attributes(obj):
"""Serializes attributes of an instance.
Iterates all attributes of an object and invokes serialize if they are
public and not callable.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
data = {}
for attr_name in dir(obj):
if attr_name.startswith('_'):
continue
attr = getattr(obj, attr_name)
if attr is None or callable(attr):
continue
# Looks okay, serialize it.
data[lower_camel_case(attr_name)] = serialize(attr)
return data
def _serialize_dict(d):
"""Invokes serialize on all of its key/value pairs.
Args:
d: The dict instance to serialize.
Returns:
The serialized dict.
"""
data = {}
for k, v in d.items():
data[lower_camel_case(k)] = serialize(v)
return data
def serialize(obj):
"""Serializes any instance.
If this is a user-defined instance
type, it will first check for a custom Serialize() function and use that
if it exists. Otherwise, it will invoke serialize all of its public
attributes. Lists and dicts are serialized trivially.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
if is_user_defined_new_style_class(obj):
if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME):
method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME)
if callable(method):
return method()
return _serialize_attributes(obj)
elif is_dict(obj):
return _serialize_dict(obj)
elif is_iterable(obj):
return [serialize(v) for v in obj]
return obj
class StringEnum(object):
"""Enum like class that is configured with a list of values.
This class effectively implements an enum for Elements, except for that
the actual values of the enums will be the string values.
"""
def __init__(self, *values):
for name in values:
setattr(self, name, name)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declares the api package."""
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the generic robot classes.
This module provides the Robot class and RobotListener interface,
as well as some helper functions for web requests and responses.
"""
import base64
import logging
import sys
try:
__import__('google3') # setup internal test environment
except ImportError:
pass
import simplejson
import blip
import events
import ops
import util
import wavelet
import errors
# We only import oauth when we need it
oauth = None
DEFAULT_PROFILE_URL = (
'http://code.google.com/apis/wave/extensions/robots/python-tutorial.html')
class Robot(object):
"""Robot metadata class.
This class holds on to basic robot information like the name and profile.
It also maintains the list of event handlers and cron jobs and
dispatches events to the appropriate handlers.
"""
def __init__(self, name, image_url='', profile_url=DEFAULT_PROFILE_URL):
"""Initializes self with robot information.
Args:
name: The name of the robot
image_url: (optional) url of an image that should be used as the avatar
for this robot.
profile_url: (optional) url of a webpage with more information about
this robot.
"""
self._handlers = {}
self._name = name
self._verification_token = None
self._st = None
self._consumer_key = None
self._consumer_secret = None
self._server_rpc_base = None
self._profile_handler = None
self._image_url = image_url
self._profile_url = profile_url
self._capability_hash = 0
@property
def name(self):
"""Returns the name of the robot."""
return self._name
@property
def image_url(self):
"""Returns the URL of the avatar image."""
return self._image_url
@property
def profile_url(self):
"""Returns the URL of an info page for the robot."""
return self._profile_url
def http_post(self, url, data, headers):
"""Execute an http post.
Monkey patch this method to use something other than
the default urllib.
Args:
url: to post to
body: post body
headers: extra headers to pass along
Returns:
response_code, returned_page
"""
import urllib2
req = urllib2.Request(url,
data=data,
headers=headers)
try:
f = urllib2.urlopen(req)
return f.code, f.read()
except urllib2.URLError, e:
return e.code, e.read()
def get_verification_token_info(self):
"""Returns the verification token and ST parameter."""
return self._verification_token, self._st
def capabilities_hash(self):
"""Return the capabilities hash as a hex string."""
return hex(self._capability_hash)
def register_handler(self, event_class, handler, context=None, filter=None):
"""Registers a handler on a specific event type.
Multiple handlers may be registered on a single event type and are
guaranteed to be called in order of registration.
The handler takes two arguments, the event object and the corresponding
wavelet.
Args:
event_class: An event to listen for from the classes defined in the
events module.
handler: A function handler which takes two arguments, the wavelet for
the event and the event object.
context: The context to provide for this handler.
filter: Depending on the event, a filter can be specified that restricts
for which values the event handler will be called from the server.
Valuable to restrict the amount of traffic send to the robot.
"""
payload = (handler, event_class, context, filter)
self._handlers.setdefault(event_class.type, []).append(payload)
if type(context) == list:
context = ','.join(context)
self._capability_hash = (self._capability_hash * 13 +
hash(event_class.type) +
hash(context) +
hash(filter)) & 0xfffffff
def set_verification_token_info(self, token, st=None):
"""Set the verification token used in the ownership verification.
/wave/robot/register starts this process up and will produce this token.
Args:
token: the token provided by /wave/robot/register.
st: optional parameter to verify the request for the token came from
the wave server.
"""
self._verification_token = token
self._st = st
def setup_oauth(self, consumer_key, consumer_secret,
server_rpc_base='http://gmodules.com/api/rpc'):
"""Configure this robot to use the oauth'd json rpc.
Args:
consumer_key: consumer key received from the verification process.
consumer_secret: secret received from the verification process.
server_rpc_base: url of the rpc gateway to use. Specify None for default.
For wave preview, http://gmodules.com/api/rpc should be used.
For wave sandbox, http://sandbox.gmodules.com/api/rpc should be used.
"""
# Import oauth inline and using __import__ for pyexe compatibility
# when oauth is not installed.
global oauth
__import__('waveapi.oauth')
oauth = sys.modules['waveapi.oauth']
self._server_rpc_base = server_rpc_base
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._oauth_signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self._oauth_consumer = oauth.OAuthConsumer(self._consumer_key,
self._consumer_secret)
def register_profile_handler(self, handler):
"""Sets the profile handler for this robot.
The profile handler will be called when a profile is needed. The handler
gets passed the name for which a profile is needed or None for the
robot itself. A dictionary with keys for name, imageUrl and
profileUrl should be returned.
"""
self._profile_handler = handler
def _hash(self, value):
"""return b64encoded sha1 hash of value."""
try:
hashlib = __import__('hashlib') # 2.5
hashed = hashlib.sha1(value)
except ImportError:
import sha # deprecated
hashed = sha.sha(value)
return base64.b64encode(hashed.digest())
def make_rpc(self, operations):
"""Make an rpc call, submitting the specified operations."""
if not oauth or not self._oauth_consumer.key:
raise errors.Error('OAuth has not been configured')
if (not type(operations) == list and
not isinstance(operations, ops.OperationQueue)):
operations = [operations]
rpcs = [op.serialize(method_prefix='wave') for op in operations]
post_body = simplejson.dumps(rpcs)
body_hash = self._hash(post_body)
params = {
'oauth_consumer_key': 'google.com:' + self._oauth_consumer.key,
'oauth_timestamp': oauth.generate_timestamp(),
'oauth_nonce': oauth.generate_nonce(),
'oauth_version': oauth.OAuthRequest.version,
'oauth_body_hash': body_hash,
}
oauth_request = oauth.OAuthRequest.from_request('POST',
self._server_rpc_base,
parameters=params)
oauth_request.sign_request(self._oauth_signature_method,
self._oauth_consumer,
None)
code, content = self.http_post(
url=oauth_request.to_url(),
data=post_body,
headers={'Content-Type': 'application/json'})
logging.info('Active URL: %s' % oauth_request.to_url())
logging.info('Active Outgoing: %s' % post_body)
if code != 200:
logging.info(oauth_request.to_url())
logging.info(content)
raise IOError('HttpError ' + str(code))
return simplejson.loads(content)
def _first_rpc_result(self, result):
"""result is returned from make_rpc. Get the first data record
or throw an exception if it was an error."""
if type(result) == list:
result = result[0]
error = result.get('error')
if error:
raise errors.Error('RPC Error' + str(error['code'])
+ ': ' + error['message'])
data = result.get('data')
if data:
return data
raise errors.Error('RPC Error: No data record.')
def capabilities_xml(self):
"""Return this robot's capabilities as an XML string."""
lines = []
for capability, payloads in self._handlers.items():
for payload in payloads:
handler, event_class, context, filter = payload
line = ' <w:capability name="%s"' % capability
if context:
if type(context) == list:
context = ','.join(context)
line += ' context="%s"' % context
if filter:
line += ' filter="%s"' % filter
line += '/>\n'
lines.append(line)
if self._consumer_key:
oauth_tag = '<w:consumer_key>%s</w:consumer_key>\n' % self._consumer_key
else:
oauth_tag = ''
return ('<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>%s</w:version>\n'
'%s'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n'
'%s'
'</w:capabilities>\n'
'</w:robot>\n') % (self.capabilities_hash(),
oauth_tag,
ops.PROTOCOL_VERSION,
'\n'.join(lines))
def profile_json(self, name=None):
"""Returns a JSON representation of the profile.
This method is called both for the basic profile of the robot and to
get a proxying for profile, in which case name is set. By default
the information supplied at registration is returned.
Use register_profile_handler to override this default behavior.
"""
if self._profile_handler:
data = self._profile_handler(name)
else:
data = {'name': self.name,
'imageUrl': self.image_url,
'profileUrl': self.profile_url}
return simplejson.dumps(data)
def _wavelet_from_json(self, json, pending_ops):
"""Construct a wavelet from the passed json.
The json should either contain a wavelet and a blips record that
define those respective object. The returned wavelet
will be constructed using the passed pending_ops
OperationQueue.
Alternatively the json can be the result of a previous
wavelet.serialize() call. In that case the blips will
be contaned in the wavelet record.
"""
if isinstance(json, basestring):
json = simplejson.loads(json)
blips = {}
for blip_id, raw_blip_data in json['blips'].items():
blips[blip_id] = blip.Blip(raw_blip_data, blips, pending_ops)
if 'wavelet' in json:
raw_wavelet_data = json['wavelet']
elif 'waveletData' in json:
raw_wavelet_data = json['waveletData']
else:
raw_wavelet_data = json
wavelet_blips = {}
wavelet_id = raw_wavelet_data['waveletId']
wave_id = raw_wavelet_data['waveId']
for blip_id, instance in blips.items():
if instance.wavelet_id == wavelet_id and instance.wave_id == wave_id:
wavelet_blips[blip_id] = instance
result = wavelet.Wavelet(raw_wavelet_data, wavelet_blips, self, pending_ops)
robot_address = json.get('robotAddress')
if robot_address:
result.robot_address = robot_address
return result
def process_events(self, json):
"""Process an incoming set of events encoded as json."""
parsed = simplejson.loads(json)
pending_ops = ops.OperationQueue()
event_wavelet = self._wavelet_from_json(parsed, pending_ops)
for event_data in parsed['events']:
for payload in self._handlers.get(event_data['type'], []):
handler, event_class, context, filter = payload
event = event_class(event_data, event_wavelet)
handler(event, event_wavelet)
pending_ops.set_capability_hash(self.capabilities_hash())
return simplejson.dumps(pending_ops.serialize())
def new_wave(self, domain, participants=None, message='', proxy_for_id=None,
submit=False):
"""Create a new wave with the initial participants on it.
A new wave is returned with its own operation queue. It the
responsibility of the caller to make sure this wave gets
submitted to the server, either by calling robot.submit() or
by calling .submit_with() on the returned wave.
Args:
domain: the domain to create the wavelet on. This should
in general correspond to the domain of the incoming
wavelet. (wavelet.domain). Exceptions are situations
where the robot is calling new_wave outside of an
event or when the server is handling multiple domains.
participants: initial participants on the wave. The robot
as the creator of the wave is always added.
message: a string that will be passed back to the robot
when the WAVELET_CREATOR event is fired. This is a
lightweight way to pass around state.
submit: if true, use the active gateway to make a round
trip to the server. This will return immediately an
actual waveid/waveletid and blipId for the root blip.
"""
operation_queue = ops.OperationQueue(proxy_for_id)
if not isinstance(message, basestring):
message = simplejson.dumps(message)
blip_data, wavelet_data = operation_queue.robot_create_wavelet(
domain=domain,
participants=participants,
message=message)
blips = {}
root_blip = blip.Blip(blip_data, blips, operation_queue)
blips[root_blip.blip_id] = root_blip
created = wavelet.Wavelet(wavelet_data,
blips=blips,
robot=self,
operation_queue=operation_queue)
if submit:
result = self._first_rpc_result(self.submit(created))
if type(result) == list:
result = result[0]
# Currently, data is sometimes wrapped in an outer 'data'
# Remove these 2 lines when that is no longer an issue.
if 'data' in result and len(result) == 2:
result = result['data']
if 'blipId' in result:
blip_data['blipId'] = result['blipId']
wavelet_data['rootBlipId'] = result['blipId']
for field in 'waveId', 'waveletId':
if field in result:
wavelet_data[field] = result[field]
blip_data[field] = result[field]
blips = {}
root_blip = blip.Blip(blip_data, blips, operation_queue)
blips[root_blip.blip_id] = root_blip
created = wavelet.Wavelet(wavelet_data,
blips=blips,
robot=self,
operation_queue=operation_queue)
return created
def fetch_wavelet(self, wave_id, wavelet_id, proxy_for_id=None):
"""Use the REST interface to fetch a wave and return it.
The returned wavelet contains a snapshot of the state of the
wavelet at that point. It can be used to modify the wavelet,
but the wavelet might change in between, so treat carefully.
Also note that the wavelet returned has its own operation
queue. It the responsibility of the caller to make sure this
wavelet gets submited to the server, either by calling
robot.submit() or by calling .submit_with() on the returned
wavelet.
"""
operation_queue = ops.OperationQueue(proxy_for_id)
operation_queue.robot_fetch_wave(wave_id, wavelet_id)
result = self._first_rpc_result(self.make_rpc(operation_queue))
return self._wavelet_from_json(result, ops.OperationQueue(proxy_for_id))
def blind_wavelet(self, json, proxy_for_id=None):
"""Construct a blind wave from a json string.
Call this method if you have a snapshot of a wave that you
want to operate on outside of an event. Since the wave might
have changed since you last saw it, you should take care to
submit operations that are as safe as possible.
Args:
json: a json object or string containing at least a key
wavelet defining the wavelet and a key blips defining the
blips in the view.
proxy_for_id: the proxying information that will be set on the wavelet's
operation queue.
Returns:
A new wavelet with its own operation queue. It the
responsibility of the caller to make sure this wavelet gets
submited to the server, either by calling robot.submit() or
by calling .submit_with() on the returned wavelet.
"""
return self._wavelet_from_json(json, ops.OperationQueue(proxy_for_id))
def submit(self, wavelet_to_submit):
"""Submit the pending operations associated with wavelet_to_submit.
Typically the wavelet will be the result of fetch_wavelet, blind_wavelet
or new_wave.
"""
pending = wavelet_to_submit.get_operation_queue()
res = self.make_rpc(pending)
pending.clear()
logging.info('submit returned:%s', res)
return res
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the blip module."""
import unittest
import blip
import element
import ops
import simplejson
TEST_BLIP_DATA = {
'childBlipIds': [],
'content': '\nhello world!\nanother line',
'contributors': ['robot@test.com', 'user@test.com'],
'creator': 'user@test.com',
'lastModifiedTime': 1000,
'parentBlipId': None,
'annotations': [{'range': {'start': 2, 'end': 3},
'name': 'key', 'value': 'val'}],
'waveId': 'test.com!w+g3h3im',
'waveletId': 'test.com!root+conv',
'elements':{'14':{'type':'GADGET','properties':{'url':'http://a/b.xml'}}},
}
CHILD_BLIP_ID = 'b+42'
ROOT_BLIP_ID = 'b+43'
class TestBlip(unittest.TestCase):
"""Tests the primary data structures for the wave model."""
def assertBlipStartswith(self, expected, totest):
actual = totest.text[:len(expected)]
self.assertEquals(expected, actual)
def new_blip(self, **args):
"""Create a blip for testing."""
data = TEST_BLIP_DATA.copy()
data.update(args)
res = blip.Blip(data, self.all_blips, self.operation_queue)
self.all_blips[res.blip_id] = res
return res
def setUp(self):
self.all_blips = {}
self.operation_queue = ops.OperationQueue()
def testBlipProperties(self):
root = self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID])
child = self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
self.assertEquals(ROOT_BLIP_ID, root.blip_id)
self.assertEquals(set([CHILD_BLIP_ID]), root.child_blip_ids)
self.assertEquals(set(TEST_BLIP_DATA['contributors']), root.contributors)
self.assertEquals(TEST_BLIP_DATA['creator'], root.creator)
self.assertEquals(TEST_BLIP_DATA['content'], root.text)
self.assertEquals(TEST_BLIP_DATA['lastModifiedTime'],
root.last_modified_time)
self.assertEquals(TEST_BLIP_DATA['parentBlipId'], root.parent_blip_id)
self.assertEquals(TEST_BLIP_DATA['waveId'], root.wave_id)
self.assertEquals(TEST_BLIP_DATA['waveletId'], root.wavelet_id)
self.assertEquals(TEST_BLIP_DATA['content'][3], root[3])
self.assertEquals(element.Gadget.class_type, root[14].type)
self.assertEquals('http://a/b.xml', root[14].url)
self.assertEquals('a', root.text[14])
self.assertEquals(len(TEST_BLIP_DATA['content']), len(root))
self.assertTrue(root.is_root())
self.assertFalse(child.is_root())
self.assertEquals(root, child.parent_blip)
def testBlipSerialize(self):
root = self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID])
serialized = root.serialize()
unserialized = blip.Blip(serialized, self.all_blips, self.operation_queue)
self.assertEquals(root.blip_id, unserialized.blip_id)
self.assertEquals(root.child_blip_ids, unserialized.child_blip_ids)
self.assertEquals(root.contributors, unserialized.contributors)
self.assertEquals(root.creator, unserialized.creator)
self.assertEquals(root.text, unserialized.text)
self.assertEquals(root.last_modified_time, unserialized.last_modified_time)
self.assertEquals(root.parent_blip_id, unserialized.parent_blip_id)
self.assertEquals(root.wave_id, unserialized.wave_id)
self.assertEquals(root.wavelet_id, unserialized.wavelet_id)
self.assertTrue(unserialized.is_root())
def testDocumentOperations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
newlines = [x for x in blip.find('\n')]
self.assertEquals(2, len(newlines))
blip.first('world').replace('jupiter')
bits = blip.text.split('\n')
self.assertEquals(3, len(bits))
self.assertEquals('hello jupiter!', bits[1])
blip.range(2, 5).delete()
self.assertBlipStartswith('\nho jupiter', blip)
blip.first('ho').insert_after('la')
self.assertBlipStartswith('\nhola jupiter', blip)
blip.at(3).insert(' ')
self.assertBlipStartswith('\nho la jupiter', blip)
def testElementHandling(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
url = 'http://www.test.com/image.png'
org_len = len(blip)
blip.append(element.Image(url=url))
elems = [elem for elem in blip.find(element.Image, url=url)]
self.assertEquals(1, len(elems))
elem = elems[0]
self.assertTrue(isinstance(elem, element.Image))
blip.at(1).insert('twelve chars')
self.assertTrue(blip.text.startswith('\ntwelve charshello'))
elem = blip[org_len + 12].value()
self.assertTrue(isinstance(elem, element.Image))
blip.first('twelve ').delete()
self.assertTrue(blip.text.startswith('\nchars'))
elem = blip[org_len + 12 - len('twelve ')].value()
self.assertTrue(isinstance(elem, element.Image))
blip.first('chars').replace(element.Image(url=url))
elems = [elem for elem in blip.find(element.Image, url=url)]
self.assertEquals(2, len(elems))
self.assertTrue(blip.text.startswith('\n hello'))
elem = blip[1].value()
self.assertTrue(isinstance(elem, element.Image))
def testAnnotationHandling(self):
key = 'style/fontWeight'
def get_bold():
for an in blip.annotations[key]:
if an.value == 'bold':
return an
return None
json = ('[{"range":{"start":3,"end":6},"name":"%s","value":"bold"}]'
% key)
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json))
self.assertEquals(1, len(blip.annotations))
self.assertNotEqual(None, get_bold().value)
self.assertTrue(key in blip.annotations)
# extend the bold annotation by adding:
blip.range(5, 8).annotate(key, 'bold')
self.assertEquals(1, len(blip.annotations))
self.assertEquals(8, get_bold().end)
# clip by adding a same keyed:
blip[4:12].annotate(key, 'italic')
self.assertEquals(2, len(blip.annotations[key]))
self.assertEquals(4, get_bold().end)
# now split the italic one:
blip.range(6, 7).clear_annotation(key)
self.assertEquals(3, len(blip.annotations[key]))
# test names and iteration
self.assertEquals(1, len(blip.annotations.names()))
self.assertEquals(3, len([x for x in blip.annotations]))
blip[3: 5].annotate('foo', 'bar')
self.assertEquals(2, len(blip.annotations.names()))
self.assertEquals(4, len([x for x in blip.annotations]))
blip[3: 5].clear_annotation('foo')
# clear the whole thing
blip.all().clear_annotation(key)
# getting to the key should now throw an exception
self.assertRaises(KeyError, blip.annotations.__getitem__, key)
def testBlipOperations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
self.assertEquals(1, len(self.all_blips))
otherblip = blip.reply()
otherblip.append('hello world')
self.assertEquals('hello world', otherblip.text)
self.assertEquals(blip.blip_id, otherblip.parent_blip_id)
self.assertEquals(2, len(self.all_blips))
inline = blip.insert_inline_blip(3)
self.assertEquals(blip.blip_id, inline.parent_blip_id)
self.assertEquals(3, len(self.all_blips))
def testInsertInlineBlipCantInsertAtTheBeginning(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
self.assertEquals(1, len(self.all_blips))
self.assertRaises(IndexError, blip.insert_inline_blip, 0)
self.assertEquals(1, len(self.all_blips))
def testDocumentModify(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('a text with text and then some text')
blip[7].insert('text ')
blip.all('text').replace('thing')
self.assertEquals('a thing thing with thing and then some thing',
blip.text)
def testIteration(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('aaa 012 aaa 345 aaa 322')
count = 0
prev = -1
for start, end in blip.all('aaa'):
count += 1
self.assertTrue(prev < start)
prev = start
self.assertEquals(3, count)
def testBlipRefValue(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
content = blip.text
content = content[:4] + content[5:]
del blip[4]
self.assertEquals(content, blip.text)
content = content[:2] + content[3:]
del blip[2:3]
self.assertEquals(content, blip.text)
blip[2:3] = 'bike'
content = content[:2] + 'bike' + content[3:]
self.assertEquals(content, blip.text)
url = 'http://www.test.com/image.png'
blip.append(element.Image(url=url))
self.assertEqual(url, blip.first(element.Image).url)
url2 = 'http://www.test.com/another.png'
blip[-1].update_element({'url': url2})
self.assertEqual(url2, blip.first(element.Image).url)
self.assertTrue(blip[3:5] == blip.text[3:5])
blip.append('geheim')
self.assertTrue(blip.first('geheim'))
self.assertFalse(blip.first(element.Button))
blip.append(element.Button(name='test1', value='Click'))
button = blip.first(element.Button)
button.update_element({'name': 'test2'})
self.assertEqual('test2', button.name)
def testReplace(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('\nxxxx')
blip.all('yyy').replace('zzz')
self.assertEqual('\nxxxx', blip.text)
def testDeleteRangeThatSpansAcrossAnnotationEndPoint(self):
json = ('[{"range":{"start":1,"end":3},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).delete()
self.assertEqual('\nF bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(2, blip.annotations['style'][0].end)
def testInsertBeforeAnnotationStartPoint(self):
json = ('[{"range":{"start":4,"end":9},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.at(4).insert('d and')
self.assertEqual('\nFood and bar.', blip.text)
self.assertEqual(9, blip.annotations['style'][0].start)
self.assertEqual(14, blip.annotations['style'][0].end)
def testDeleteRangeInsideAnnotation(self):
json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).delete()
self.assertEqual('\nF bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(3, blip.annotations['style'][0].end)
def testReplaceInsideAnnotation(self):
json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).replace('ooo')
self.assertEqual('\nFooo bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(6, blip.annotations['style'][0].end)
blip.range(2, 5).replace('o')
self.assertEqual('\nFo bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(4, blip.annotations['style'][0].end)
def testReplaceSpanAnnotation(self):
json = ('[{"range":{"start":1,"end":4},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 9).replace('')
self.assertEqual('\nF', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(2, blip.annotations['style'][0].end)
def testSearchWithNoMatchShouldNotGenerateOperation(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
self.assertEqual(-1, blip.text.find(':('))
self.assertEqual(0, len(self.operation_queue))
blip.all(':(').replace(':)')
self.assertEqual(0, len(self.operation_queue))
def testBlipsRemoveWithId(self):
blip_dict = {
ROOT_BLIP_ID: self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID]),
CHILD_BLIP_ID: self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
}
blips = blip.Blips(blip_dict)
blips._remove_with_id(CHILD_BLIP_ID)
self.assertEqual(1, len(blips))
self.assertEqual(0, len(blips[ROOT_BLIP_ID].child_blip_ids))
def testAppendMarkup(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.')
markup = '<p><span>markup<span> content</p>'
blip.append_markup(markup)
self.assertEqual(1, len(self.operation_queue))
self.assertEqual('\nFoo bar.\nmarkup content', blip.text)
def testBundledAnnotations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.')
blip.append('not bold')
blip.append('bold', bundled_annotations=[('style/fontWeight', 'bold')])
self.assertEqual(2, len(blip.annotations))
self.assertEqual('bold', blip.annotations['style/fontWeight'][0].value)
def testInlineBlipOffset(self):
offset = 14
self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID],
elements={str(offset):
{'type': element.Element.INLINE_BLIP_TYPE,
'properties': {'id': CHILD_BLIP_ID}}})
child = self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
self.assertEqual(offset, child.inline_blip_offset)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defines the ModuleTestRunnerClass."""
import unittest
class ModuleTestRunner(object):
"""Responsible for executing all test cases in a list of modules."""
def __init__(self, module_list=None, module_test_settings=None):
self.modules = module_list or []
self.settings = module_test_settings or {}
def RunAllTests(self):
"""Executes all tests present in the list of modules."""
runner = unittest.TextTestRunner()
for module in self.modules:
for setting, value in self.settings.iteritems():
try:
setattr(module, setting, value)
except AttributeError:
print '\nError running ' + str(setting)
print '\nRunning all tests in module', module.__name__
runner.run(unittest.defaultTestLoader.loadTestsFromModule(module))
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines event types that are sent from the wave server.
This module defines all of the event types currently supported by the wave
server. Each event type is sub classed from Event and has its own
properties depending on the type.
"""
class Context(object):
"""Specifies constants representing different context requests."""
#: Requests the root blip.
ROOT = 'ROOT'
#: Requests the parent blip of the event blip.
PARENT = 'PARENT'
#: Requests the siblings blip of the event blip.
SIBLINGS = 'SIBLINGS'
#: Requests the child blips of the event blip.
CHILDREN = 'CHILDREN'
#: Requests the event blip itself.
SELF = 'SELF'
#: Requests all of the blips of the event wavelet.
ALL = 'ALL'
class Event(object):
"""Object describing a single event.
Attributes:
modified_by: Participant id that caused this event.
timestamp: Timestamp that this event occurred on the server.
type: Type string of this event.
properties: Dictionary of all extra properties. Typically the derrived
event type should have these explicitly set as attributes, but
experimental features might appear in properties before that.
blip_id: The blip_id of the blip for blip related events or the root
blip for wavelet related events.
blip: If available, the blip with id equal to the events blip_id.
proxying_for: If available, the proxyingFor id of the robot that caused the
event.
"""
def __init__(self, json, wavelet):
"""Inits this event with JSON data.
Args:
json: JSON data from Wave server.
"""
self.modified_by = json.get('modifiedBy')
self.timestamp = json.get('timestamp', 0)
self.type = json.get('type')
self.raw_data = json
self.properties = json.get('properties', {})
self.blip_id = self.properties.get('blipId')
self.blip = wavelet.blips.get(self.blip_id)
self.proxying_for = json.get('proxyingFor')
class WaveletBlipCreated(Event):
"""Event triggered when a new blip is created.
Attributes:
new_blip_id: The id of the newly created blip.
new_blip: If in context, the actual new blip.
"""
type = 'WAVELET_BLIP_CREATED'
def __init__(self, json, wavelet):
super(WaveletBlipCreated, self).__init__(json, wavelet)
self.new_blip_id = self.properties['newBlipId']
self.new_blip = wavelet.blips.get(self.new_blip_id)
class WaveletBlipRemoved(Event):
"""Event triggered when a new blip is removed.
Attributes:
removed_blip_id: the id of the removed blip
removed_blip: if in context, the removed blip
"""
type = 'WAVELET_BLIP_REMOVED'
def __init__(self, json, wavelet):
super(WaveletBlipRemoved, self).__init__(json, wavelet)
self.removed_blip_id = self.properties['removedBlipId']
self.removed_blip = wavelet.blips.get(self.removed_blip_id)
class WaveletParticipantsChanged(Event):
"""Event triggered when the participants on a wave change.
Attributes:
participants_added: List of participants added.
participants_removed: List of participants removed.
"""
type = 'WAVELET_PARTICIPANTS_CHANGED'
def __init__(self, json, wavelet):
super(WaveletParticipantsChanged, self).__init__(json, wavelet)
self.participants_added = self.properties['participantsAdded']
self.participants_removed = self.properties['participantsRemoved']
class WaveletSelfAdded(Event):
"""Event triggered when the robot is added to the wavelet."""
type = 'WAVELET_SELF_ADDED'
class WaveletSelfRemoved(Event):
"""Event triggered when the robot is removed from the wavelet."""
type = 'WAVELET_SELF_REMOVED'
class WaveletTitleChanged(Event):
"""Event triggered when the title of the wavelet has changed.
Attributes:
title: The new title.
"""
type = 'WAVELET_TITLE_CHANGED'
def __init__(self, json, wavelet):
super(WaveletTitleChanged, self).__init__(json, wavelet)
self.title = self.properties['title']
class BlipContributorsChanged(Event):
"""Event triggered when the contributors to this blip change.
Attributes:
contributors_added: List of contributors that were added.
contributors_removed: List of contributors that were removed.
"""
type = 'BLIP_CONTRIBUTORS_CHANGED'
def __init__(self, json, wavelet):
super(BlipContributorsChanged, self).__init__(json, wavelet)
self.contibutors_added = self.properties['contributorsAdded']
self.contibutors_removed = self.properties['contributorsRemoved']
class BlipSubmitted(Event):
"""Event triggered when a blip is submitted."""
type = 'BLIP_SUBMITTED'
class DocumentChanged(Event):
"""Event triggered when a document is changed.
This event is fired after any changes in the document and should be used
carefully to keep the amount of traffic to the robot reasonable. Use
filters where appropriate.
"""
type = 'DOCUMENT_CHANGED'
class FormButtonClicked(Event):
"""Event triggered when a form button is clicked.
Attributes:
button_name: The name of the button that was clicked.
"""
type = 'FORM_BUTTON_CLICKED'
def __init__(self, json, wavelet):
super(FormButtonClicked, self).__init__(json, wavelet)
self.button_name = self.properties['buttonName']
class GadgetStateChanged(Event):
"""Event triggered when the state of a gadget changes.
Attributes:
index: The index of the gadget that changed in the document.
old_state: The old state of the gadget.
"""
type = 'GADGET_STATE_CHANGED'
def __init__(self, json, wavelet):
super(GadgetStateChanged, self).__init__(json, wavelet)
self.index = self.properties['index']
self.old_state = self.properties['oldState']
class AnnotatedTextChanged(Event):
"""Event triggered when text with an annotation has changed.
This is mainly useful in combination with a filter on the
name of the annotation.
Attributes:
name: The name of the annotation.
value: The value of the annotation that changed.
"""
type = 'ANNOTATED_TEXT_CHANGED'
def __init__(self, json, wavelet):
super(AnnotatedTextChanged, self).__init__(json, wavelet)
self.name = self.properties['name']
self.value = self.properties.get('value')
class OperationError(Event):
"""Triggered when an event on the server occurred.
Attributes:
operation_id: The operation id of the failing operation.
error_message: More information as to what went wrong.
"""
type = 'OPERATION_ERROR'
def __init__(self, json, wavelet):
super(OperationError, self).__init__(json, wavelet)
self.operation_id = self.properties['operationId']
self.error_message = self.properties['message']
class WaveletCreated(Event):
"""Triggered when a new wavelet is created.
This event is only triggered if the robot creates a new
wavelet and can be used to initialize the newly created wave.
wavelets created by other participants remain invisible
to the robot until the robot is added to the wave in
which case WaveletSelfAdded is triggered.
Attributes:
message: Whatever string was passed into the new_wave
call as message (if any).
"""
type = 'WAVELET_CREATED'
def __init__(self, json, wavelet):
super(WaveletCreated, self).__init__(json, wavelet)
self.message = self.properties['message']
class WaveletFetched(Event):
"""Triggered when a new wavelet is fetched.
This event is triggered after a robot requests to
see another wavelet. The robot has to be on the other
wavelet already.
Attributes:
message: Whatever string was passed into the new_wave
call as message (if any).
"""
type = 'WAVELET_FETCHED'
def __init__(self, json, wavelet):
super(WaveletFetched, self).__init__(json, wavelet)
self.message = self.properties['message']
class WaveletTagsChanged(Event):
"""Event triggered when the tags on a wavelet change."""
type = 'WAVELET_TAGS_CHANGED'
def __init__(self, json, wavelet):
super(WaveletTagsChanged, self).__init__(json, wavelet)
def is_event(cls):
"""Returns whether the passed class is an event."""
try:
if not issubclass(cls, Event):
return False
return hasattr(cls, 'type')
except TypeError:
return False
ALL = [item for item in globals().copy().values() if is_event(item)]
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import element
import errors
import util
class Annotation(object):
"""Models an annotation on a document.
Annotations are key/value pairs over a range of content. Annotations
can be used to store data or to be interpreted by a client when displaying
the data.
"""
# Use the following constants to control the display of the client
#: Reserved annotation for setting background color of text.
BACKGROUND_COLOR = "style/backgroundColor"
#: Reserved annotation for setting color of text.
COLOR = "style/color"
#: Reserved annotation for setting font family of text.
FONT_FAMILY = "style/fontFamily"
#: Reserved annotation for setting font family of text.
FONT_SIZE = "style/fontSize"
#: Reserved annotation for setting font style of text.
FONT_STYLE = "style/fontStyle"
#: Reserved annotation for setting font weight of text.
FONT_WEIGHT = "style/fontWeight"
#: Reserved annotation for setting text decoration.
TEXT_DECORATION = "style/textDecoration"
#: Reserved annotation for setting vertical alignment.
VERTICAL_ALIGN = "style/verticalAlign"
def __init__(self, name, value, start, end):
self._name = name
self._value = value
self._start = start
self._end = end
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def start(self):
return self._start
@property
def end(self):
return self._end
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
if self._start >= where:
self._start += inc
if self._end >= where:
self._end += inc
def serialize(self):
"""Serializes the annotation.
Returns:
A dict containing the name, value, and range values.
"""
return {'name': self._name,
'value': self._value,
'range': {'start': self._start,
'end': self._end}}
class Annotations(object):
"""A dictionary-like object containing the annotations, keyed by name."""
def __init__(self, operation_queue, blip):
self._operation_queue = operation_queue
self._blip = blip
self._store = {}
def __contains__(self, what):
if isinstance(what, Annotation):
what = what.name
return what in self._store
def _add_internal(self, name, value, start, end):
"""Internal add annotation does not send out operations."""
if name in self._store:
# TODO: use bisect to make this more efficient.
new_list = []
for existing in self._store[name]:
if start > existing.end or end < existing.start:
new_list.append(existing)
else:
if existing.value == value:
# merge the annotations:
start = min(existing.start, start)
end = max(existing.end, end)
else:
# chop the bits off the existing annotation
if existing.start < start:
new_list.append(Annotation(
existing.name, existing.value, existing.start, start))
if existing.end > end:
new_list.append(Annotation(
existing.name, existing.value, existing.end, end))
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
else:
self._store[name] = [Annotation(name, value, start, end)]
def _delete_internal(self, name, start=0, end=-1):
"""Remove the passed annotaion from the internal representation."""
if not name in self._store:
return
if end < 0:
end = len(self._blip) + end
new_list = []
for a in self._store[name]:
if start > a.end or end < a.start:
new_list.append(a)
elif start < a.start and end > a.end:
continue
else:
if a.start < start:
new_list.append(Annotation(name, a.value, a.start, start))
if a.end > end:
new_list.append(Annotation(name, a.value, end, a.end))
if new_list:
self._store[name] = new_list
else:
del self._store[name]
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
for annotations in self._store.values():
for annotation in annotations:
annotation._shift(where, inc)
# Merge fragmented annotations that should be contiguous, for example:
# Annotation('foo', 'bar', 1, 2) and Annotation('foo', 'bar', 2, 3).
for name, annotations in self._store.items():
new_list = []
for i, annotation in enumerate(annotations):
name = annotation.name
value = annotation.value
start = annotation.start
end = annotation.end
# Find the last end index.
for j, next_annotation in enumerate(annotations[i + 1:]):
# Not contiguous, skip.
if (end < next_annotation.start):
break
# Contiguous, merge.
if (end == next_annotation.start and value == next_annotation.value):
end = next_annotation.end
del annotations[j]
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
def __len__(self):
return len(self._store)
def __getitem__(self, key):
return self._store[key]
def __iter__(self):
for l in self._store.values():
for ann in l:
yield ann
def names(self):
"""Return the names of the annotations in the store."""
return self._store.keys()
def serialize(self):
"""Return a list of the serialized annotations."""
res = []
for v in self._store.values():
res += [a.serialize() for a in v]
return res
class Blips(object):
"""A dictionary-like object containing the blips, keyed on blip ID."""
def __init__(self, blips):
self._blips = blips
def __getitem__(self, blip_id):
return self._blips[blip_id]
def __iter__(self):
return self._blips.__iter__()
def __len__(self):
return len(self._blips)
def _add(self, ablip):
self._blips[ablip.blip_id] = ablip
def _remove_with_id(self, blip_id):
del_blip = self._blips[blip_id]
if del_blip:
# Remove the reference to this blip from its parent.
parent_blip = self._blips[blip_id].parent_blip
if parent_blip:
parent_blip._child_blip_ids.remove(blip_id)
del self._blips[blip_id]
def get(self, blip_id, default_value=None):
"""Retrieves a blip.
Returns:
A Blip object. If none found for the ID, it returns None,
or if default_value is specified, it returns that.
"""
return self._blips.get(blip_id, default_value)
def serialize(self):
"""Serializes the blips.
Returns:
A dict of serialized blips.
"""
res = {}
for blip_id, item in self._blips.items():
res[blip_id] = item.serialize()
return res
class BlipRefs(object):
"""Represents a set of references to contents in a blip.
For example, a BlipRefs instance can represent the results
of a search, an explicitly set range, a regular expression,
or refer to the entire blip. BlipRefs are used to express
operations on a blip in a consistent way that can easily
be transfered to the server.
The typical way of creating a BlipRefs object is to use
selector methods on the Blip object. Developers will not
usually instantiate a BlipRefs object directly.
"""
DELETE = 'DELETE'
REPLACE = 'REPLACE'
INSERT = 'INSERT'
INSERT_AFTER = 'INSERT_AFTER'
ANNOTATE = 'ANNOTATE'
CLEAR_ANNOTATION = 'CLEAR_ANNOTATION'
UPDATE_ELEMENT = 'UPDATE_ELEMENT'
def __init__(self, blip, maxres=1):
self._blip = blip
self._maxres = maxres
@classmethod
def all(cls, blip, findwhat, maxres=-1, **restrictions):
"""Construct an instance representing the search for text or elements."""
obj = cls(blip, maxres)
obj._findwhat = findwhat
obj._restrictions = restrictions
obj._hits = lambda: obj._find(findwhat, maxres, **restrictions)
if findwhat is None:
# No findWhat, take the entire blip
obj._params = {}
else:
query = {'maxRes': maxres}
if isinstance(findwhat, basestring):
query['textMatch'] = findwhat
else:
query['elementMatch'] = findwhat.class_type
query['restrictions'] = restrictions
obj._params = {'modifyQuery': query}
return obj
@classmethod
def range(cls, blip, begin, end):
"""Constructs an instance representing an explicitly set range."""
obj = cls(blip)
obj._begin = begin
obj._end = end
obj._hits = lambda: [(begin, end)]
obj._params = {'range': {'start': begin, 'end': end}}
return obj
def _elem_matches(self, elem, clz, **restrictions):
if not isinstance(elem, clz):
return False
for key, val in restrictions.items():
if getattr(elem, key) != val:
return False
return True
def _find(self, what, maxres=-1, **restrictions):
"""Iterates where 'what' occurs in the associated blip.
What can be either a string or a class reference.
Examples:
self._find('hello') will return the first occurence of the word hello
self._find(element.Gadget, url='http://example.com/gadget.xml')
will return the first gadget that has as url example.com.
Args:
what: what to search for. Can be a class or a string. The class
should be an element from element.py
maxres: number of results to return at most, or <= 0 for all.
restrictions: if what specifies a class, further restrictions
of the found instances.
Yields:
Tuples indicating the range of the matches. For a one
character/element match at position x, (x, x+1) is yielded.
"""
blip = self._blip
if what is None:
yield 0, len(blip)
raise StopIteration
if isinstance(what, basestring):
idx = blip._content.find(what)
count = 0
while idx != -1:
yield idx, idx + len(what)
count += 1
if count == maxres:
raise StopIteration
idx = blip._content.find(what, idx + len(what))
else:
count = 0
for idx, el in blip._elements.items():
if self._elem_matches(el, what, **restrictions):
yield idx, idx + 1
count += 1
if count == maxres:
raise StopIteration
def _execute(self, modify_how, what, bundled_annotations=None):
"""Executes this BlipRefs object.
Args:
modify_how: What to do. Any of the operation declared at the top.
what: Depending on the operation. For delete, has to be None.
For the others it is a singleton, a list or a function returning
what to do; for ANNOTATE tuples of (key, value), for the others
either string or elements.
If what is a function, it takes three parameters, the content of
the blip, the beginning of the matching range and the end.
bundled_annotations: Annotations to apply immediately.
Raises:
IndexError when trying to access content outside of the blip.
ValueError when called with the wrong values.
Returns:
self for chainability.
"""
blip = self._blip
if modify_how != BlipRefs.DELETE:
if type(what) != list:
what = [what]
next_index = 0
matched = []
# updated_elements is used to store the element type of the
# element to update
updated_elements = []
# For now, if we find one markup, we'll use it everywhere.
next = None
hit_found = False
for start, end in self._hits():
hit_found = True
if start < 0:
start += len(blip)
if end == 0:
end += len(blip)
if end < 0:
end += len(blip)
if len(blip) == 0:
if start != 0 or end != 0:
raise IndexError('Start and end have to be 0 for empty document')
elif start < 0 or end < 1 or start >= len(blip) or end > len(blip):
raise IndexError('Position outside the document')
if modify_how == BlipRefs.DELETE:
for i in range(start, end):
if i in blip._elements:
del blip._elements[i]
blip._delete_annotations(start, end)
blip._shift(end, start - end)
blip._content = blip._content[:start] + blip._content[end:]
else:
if callable(what):
next = what(blip._content, start, end)
matched.append(next)
else:
next = what[next_index]
next_index = (next_index + 1) % len(what)
if isinstance(next, str):
next = util.force_unicode(next)
if modify_how == BlipRefs.ANNOTATE:
key, value = next
blip.annotations._add_internal(key, value, start, end)
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
blip.annotations._delete_internal(next, start, end)
elif modify_how == BlipRefs.UPDATE_ELEMENT:
el = blip._elements.get(start)
if not element:
raise ValueError('No element found at index %s' % start)
# the passing around of types this way feels a bit dirty:
updated_elements.append(element.Element.from_json({'type': el.type,
'properties': next}))
for k, b in next.items():
setattr(el, k, b)
else:
if modify_how == BlipRefs.INSERT:
end = start
elif modify_how == BlipRefs.INSERT_AFTER:
start = end
elif modify_how == BlipRefs.REPLACE:
pass
else:
raise ValueError('Unexpected modify_how: ' + modify_how)
if isinstance(next, element.Element):
text = ' '
else:
text = next
# in the case of a replace, and the replacement text is shorter,
# delete the delta.
if start != end and len(text) < end - start:
blip._delete_annotations(start + len(text), end)
blip._shift(end, len(text) + start - end)
blip._content = blip._content[:start] + text + blip._content[end:]
if bundled_annotations:
end_annotation = start + len(text)
blip._delete_annotations(start, end_annotation)
for key, value in bundled_annotations:
blip.annotations._add_internal(key, value, start, end_annotation)
if isinstance(next, element.Element):
blip._elements[start] = next
# No match found, return immediately without generating op.
if not hit_found:
return
operation = blip._operation_queue.document_modify(blip.wave_id,
blip.wavelet_id,
blip.blip_id)
for param, value in self._params.items():
operation.set_param(param, value)
modify_action = {'modifyHow': modify_how}
if modify_how == BlipRefs.DELETE:
pass
elif modify_how == BlipRefs.UPDATE_ELEMENT:
modify_action['elements'] = updated_elements
elif (modify_how == BlipRefs.REPLACE or
modify_how == BlipRefs.INSERT or
modify_how == BlipRefs.INSERT_AFTER):
if callable(what):
what = matched
if what:
if not isinstance(next, element.Element):
modify_action['values'] = [util.force_unicode(value) for value in what]
else:
modify_action['elements'] = what
elif modify_how == BlipRefs.ANNOTATE:
modify_action['values'] = [x[1] for x in what]
modify_action['annotationKey'] = what[0][0]
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
modify_action['annotationKey'] = what[0]
if bundled_annotations:
modify_action['bundledAnnotations'] = [
{'key': key, 'value': value} for key, value in bundled_annotations]
operation.set_param('modifyAction', modify_action)
return self
def insert(self, what, bundled_annotations=None):
"""Inserts what at the matched positions."""
return self._execute(
BlipRefs.INSERT, what, bundled_annotations=bundled_annotations)
def insert_after(self, what, bundled_annotations=None):
"""Inserts what just after the matched positions."""
return self._execute(
BlipRefs.INSERT_AFTER, what, bundled_annotations=bundled_annotations)
def replace(self, what, bundled_annotations=None):
"""Replaces the matched positions with what."""
return self._execute(
BlipRefs.REPLACE, what, bundled_annotations=bundled_annotations)
def delete(self):
"""Deletes the content at the matched positions."""
return self._execute(BlipRefs.DELETE, None)
def annotate(self, name, value=None):
"""Annotates the content at the matched positions.
You can either specify both name and value to set the
same annotation, or supply as the first parameter something
that yields name/value pairs. The name and value should both be strings.
"""
if value is None:
what = name
else:
what = (name, value)
return self._execute(BlipRefs.ANNOTATE, what)
def clear_annotation(self, name):
"""Clears the annotation at the matched positions."""
return self._execute(BlipRefs.CLEAR_ANNOTATION, name)
def update_element(self, new_values):
"""Update an existing element with a set of new values."""
return self._execute(BlipRefs.UPDATE_ELEMENT, new_values)
def __nonzero__(self):
"""Return whether we have a value."""
for start, end in self._hits():
return True
return False
def value(self):
"""Convenience method to convert a BlipRefs to value of its first match."""
for start, end in self._hits():
if end - start == 1 and start in self._blip._elements:
return self._blip._elements[start]
else:
return self._blip.text[start:end]
raise ValueError('BlipRefs has no values')
def __getattr__(self, attribute):
"""Mirror the getattr of value().
This allows for clever things like
first(IMAGE).url
or
blip.annotate_with(key, value).upper()
"""
return getattr(self.value(), attribute)
def __radd__(self, other):
"""Make it possible to add this to a string."""
return other + self.value()
def __cmp__(self, other):
"""Support comparision with target."""
return cmp(self.value(), other)
def __iter__(self):
for start_end in self._hits():
yield start_end
class Blip(object):
"""Models a single blip instance.
Blips are essentially the documents that make up a conversation. Blips can
live in a hierarchy of blips. A root blip has no parent blip id, but all
blips have the ids of the wave and wavelet that they are associated with.
Blips also contain annotations, content and elements, which are accessed via
the Document object.
"""
def __init__(self, json, other_blips, operation_queue):
"""Inits this blip with JSON data.
Args:
json: JSON data dictionary from Wave server.
other_blips: A dictionary like object that can be used to resolve
ids of blips to blips.
operation_queue: an OperationQueue object to store generated operations
in.
"""
self._blip_id = json.get('blipId')
self._operation_queue = operation_queue
self._child_blip_ids = set(json.get('childBlipIds', []))
self._content = json.get('content', '')
self._contributors = set(json.get('contributors', []))
self._creator = json.get('creator')
self._last_modified_time = json.get('lastModifiedTime', 0)
self._version = json.get('version', 0)
self._parent_blip_id = json.get('parentBlipId')
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
if isinstance(other_blips, Blips):
self._other_blips = other_blips
else:
self._other_blips = Blips(other_blips)
self._annotations = Annotations(operation_queue, self)
for annjson in json.get('annotations', []):
r = annjson['range']
self._annotations._add_internal(annjson['name'],
annjson['value'],
r['start'],
r['end'])
self._elements = {}
json_elements = json.get('elements', {})
for elem in json_elements:
self._elements[int(elem)] = element.Element.from_json(json_elements[elem])
self.raw_data = json
@property
def blip_id(self):
"""The id of this blip."""
return self._blip_id
@property
def wave_id(self):
"""The id of the wave that this blip belongs to."""
return self._wave_id
@property
def wavelet_id(self):
"""The id of the wavelet that this blip belongs to."""
return self._wavelet_id
@property
def child_blip_ids(self):
"""The set of the ids of this blip's children."""
return self._child_blip_ids
@property
def child_blips(self):
"""The set of blips that are children of this blip."""
return set([self._other_blips[blid_id] for blid_id in self._child_blip_ids
if blid_id in self._other_blips])
@property
def contributors(self):
"""The set of participant ids that contributed to this blip."""
return self._contributors
@property
def creator(self):
"""The id of the participant that created this blip."""
return self._creator
@property
def last_modified_time(self):
"""The time in seconds since epoch when this blip was last modified."""
return self._last_modified_time
@property
def version(self):
"""The version of this blip."""
return self._version
@property
def parent_blip_id(self):
"""The parent blip_id or None if this is the root blip."""
return self._parent_blip_id
@property
def parent_blip(self):
"""The parent blip or None if it is the root."""
# if parent_blip_id is None, get will also return None
return self._other_blips.get(self._parent_blip_id)
@property
def inline_blip_offset(self):
"""The offset in the parent if this blip is inline or -1 if not.
If the parent is not in the context, this function will always
return -1 since it can't determine the inline blip status.
"""
parent = self.parent_blip
if not parent:
return -1
for offset, el in parent._elements.items():
if el.type == element.Element.INLINE_BLIP_TYPE and el.id == self.blip_id:
return offset
return -1
def is_root(self):
"""Returns whether this is the root blip of a wavelet."""
return self._parent_blip_id is None
@property
def annotations(self):
"""The annotations for this document."""
return self._annotations
@property
def elements(self):
"""Returns a list of elements for this document.
The elements of a blip are things like forms elements and gadgets
that cannot be expressed as plain text. In the text of the blip, you'll
typically find a space as a place holder for the element.
If you want to retrieve the element at a particular index in the blip, use
blip[index].value().
"""
return self._elements.values()
def __len__(self):
return len(self._content)
def __getitem__(self, item):
"""returns a BlipRefs for the given slice."""
if isinstance(item, slice):
if item.step:
raise errors.Error('Step not supported for blip slices')
return self.range(item.start, item.stop)
else:
return self.at(item)
def __setitem__(self, item, value):
"""short cut for self.range/at().replace(value)."""
self.__getitem__(item).replace(value)
def __delitem__(self, item):
"""short cut for self.range/at().delete()."""
self.__getitem__(item).delete()
def _shift(self, where, inc):
"""Move element and annotations after 'where' up by 'inc'."""
new_elements = {}
for idx, el in self._elements.items():
if idx >= where:
idx += inc
new_elements[idx] = el
self._elements = new_elements
self._annotations._shift(where, inc)
def _delete_annotations(self, start, end):
"""Delete all annotations between 'start' and 'end'."""
for annotation_name in self._annotations.names():
self._annotations._delete_internal(annotation_name, start, end)
def all(self, findwhat=None, maxres=-1, **restrictions):
"""Returns a BlipRefs object representing all results for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, maxres, **restrictions)
def first(self, findwhat=None, **restrictions):
"""Returns a BlipRefs object representing the first result for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, 1, **restrictions)
def at(self, index):
"""Returns a BlipRefs object representing a 1-character range."""
return BlipRefs.range(self, index, index + 1)
def range(self, start, end):
"""Returns a BlipRefs object representing the range."""
return BlipRefs.range(self, start, end)
def serialize(self):
"""Return a dictionary representation of this blip ready for json."""
return {'blipId': self._blip_id,
'childBlipIds': list(self._child_blip_ids),
'content': self._content,
'creator': self._creator,
'contributors': list(self._contributors),
'lastModifiedTime': self._last_modified_time,
'version': self._version,
'parentBlipId': self._parent_blip_id,
'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'annotations': self._annotations.serialize(),
'elements': dict([(index, e.serialize())
for index, e in self._elements.items()])
}
def proxy_for(self, proxy_for_id):
"""Return a view on this blip that will proxy for the specified id.
A shallow copy of the current blip is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
"""
operation_queue = self._operation_queue.proxy_for(proxy_for_id)
res = Blip(json={},
other_blips={},
operation_queue=operation_queue)
res._blip_id = self._blip_id
res._child_blip_ids = self._child_blip_ids
res._content = self._content
res._contributors = self._contributors
res._creator = self._creator
res._last_modified_time = self._last_modified_time
res._version = self._version
res._parent_blip_id = self._parent_blip_id
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._other_blips = self._other_blips
res._annotations = self._annotations
res._elements = self._elements
res.raw_data = self.raw_data
return res
@property
def text(self):
"""Returns the raw text content of this document."""
return self._content
def find(self, what, **restrictions):
"""Iterate to matching bits of contents.
Yield either elements or pieces of text.
"""
br = BlipRefs.all(self, what, **restrictions)
for start, end in br._hits():
if end - start == 1 and start in self._elements:
yield self._elements[start]
else:
yield self._content[start:end]
raise StopIteration
def append(self, what, bundled_annotations=None):
"""Convenience method covering a common pattern."""
return BlipRefs.all(self, findwhat=None).insert_after(
what, bundled_annotations=bundled_annotations)
def reply(self):
"""Create and return a reply to this blip."""
blip_data = self._operation_queue.blip_create_child(self.wave_id,
self.wavelet_id,
self.blip_id)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
def append_markup(self, markup):
"""Interpret the markup text as xhtml and append the result to the doc.
Args:
markup: The markup'ed text to append.
"""
markup = util.force_unicode(markup)
self._operation_queue.document_append_markup(self.wave_id,
self.wavelet_id,
self.blip_id,
markup)
self._content += util.parse_markup(markup)
def insert_inline_blip(self, position):
"""Inserts an inline blip into this blip at a specific position.
Args:
position: Position to insert the blip at. This has to be greater than 0.
Returns:
The JSON data of the blip that was created.
"""
if position <= 0:
raise IndexError(('Illegal inline blip position: %d. Position has to ' +
'be greater than 0.') % position)
blip_data = self._operation_queue.document_inline_blip_insert(
self.wave_id,
self.wavelet_id,
self.blip_id,
position)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elements are non-text bits living in blips like images, gadgets etc.
This module defines the Element class and the derived classes.
"""
import base64
import logging
import sys
import util
class Element(object):
"""Elements are non-text content within a document.
These are generally abstracted from the Robot. Although a Robot can query the
properties of an element it can only interact with the specific types that
the element represents.
Properties of elements are both accessible directly (image.url) and through
the properties dictionary (image.properties['url']). In general Element
should not be instantiated by robots, but rather rely on the derived classes.
"""
# INLINE_BLIP_TYPE is not a separate type since it shouldn't be instantiated,
# only be used for introspection
INLINE_BLIP_TYPE = "INLINE_BLIP"
def __init__(self, element_type, **properties):
"""Initializes self with the specified type and any properties.
Args:
element_type: string typed member of ELEMENT_TYPE
properties: either a dictionary of initial properties, or a dictionary
with just one member properties that is itself a dictionary of
properties. This allows us to both use
e = Element(atype, prop1=val1, prop2=prop2...)
and
e = Element(atype, properties={prop1:val1, prop2:prop2..})
"""
if len(properties) == 1 and 'properties' in properties:
properties = properties['properties']
self._type = element_type
# as long as the operation_queue of an element in None, it is
# unattached. After an element is acquired by a blip, the blip
# will set the operation_queue to make sure all changes to the
# element are properly send to the server.
self._operation_queue = None
self._properties = properties.copy()
@property
def type(self):
"""The type of this element."""
return self._type
@classmethod
def from_json(cls, json):
"""Class method to instantiate an Element based on a json string."""
etype = json['type']
props = json['properties'].copy()
element_class = ALL.get(etype)
if not element_class:
# Unknown type. Server could be newer than we are
return Element(element_type=etype, properties=props)
return element_class.from_props(props)
def get(self, key, default=None):
"""Standard get interface."""
return self._properties.get(key, default)
def __getattr__(self, key):
return self._properties[key]
def serialize(self):
"""Custom serializer for Elements."""
return util.serialize({'properties': util.non_none_dict(self._properties),
'type': self._type})
class Input(Element):
"""A single-line input element."""
class_type = 'INPUT'
def __init__(self, name, value=''):
super(Input, self).__init__(Input.class_type,
name=name,
value=value,
default_value=value)
@classmethod
def from_props(cls, props):
return Input(name=props.get('name'), value=props.get('value'))
class Check(Element):
"""A checkbox element."""
class_type = 'CHECK'
def __init__(self, name, value=''):
super(Check, self).__init__(Check.class_type,
name=name, value=value, default_value=value)
@classmethod
def from_props(cls, props):
return Check(name=props.get('name'), value=props.get('value'))
class Button(Element):
"""A button element."""
class_type = 'BUTTON'
def __init__(self, name, value):
super(Button, self).__init__(Button.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return Button(name=props.get('name'), value=props.get('value'))
class Label(Element):
"""A label element."""
class_type = 'LABEL'
def __init__(self, label_for, caption):
super(Label, self).__init__(Label.class_type,
name=label_for, value=caption)
@classmethod
def from_props(cls, props):
return Label(label_for=props.get('name'), caption=props.get('value'))
class RadioButton(Element):
"""A radio button element."""
class_type = 'RADIO_BUTTON'
def __init__(self, name, group):
super(RadioButton, self).__init__(RadioButton.class_type,
name=name, value=group)
@classmethod
def from_props(cls, props):
return RadioButton(name=props.get('name'), group=props.get('value'))
class RadioButtonGroup(Element):
"""A group of radio buttons."""
class_type = 'RADIO_BUTTON_GROUP'
def __init__(self, name, value):
super(RadioButtonGroup, self).__init__(RadioButtonGroup.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return RadioButtonGroup(name=props.get('name'), value=props.get('value'))
class Password(Element):
"""A password element."""
class_type = 'PASSWORD'
def __init__(self, name, value):
super(Password, self).__init__(Password.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return Password(name=props.get('name'), value=props.get('value'))
class TextArea(Element):
"""A text area element."""
class_type = 'TEXTAREA'
def __init__(self, name, value):
super(TextArea, self).__init__(TextArea.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return TextArea(name=props.get('name'), value=props.get('value'))
class Line(Element):
"""A line element.
Note that Lines are represented in the text as newlines.
"""
class_type = 'LINE'
# Possible line types:
#: Designates line as H1, largest heading.
TYPE_H1 = 'h1'
#: Designates line as H2 heading.
TYPE_H2 = 'h2'
#: Designates line as H3 heading.
TYPE_H3 = 'h3'
#: Designates line as H4 heading.
TYPE_H4 = 'h4'
#: Designates line as H5, smallest heading.
TYPE_H5 = 'h5'
#: Designates line as a bulleted list item.
TYPE_LI = 'li'
# Possible values for align
#: Sets line alignment to left.
ALIGN_LEFT = 'l'
#: Sets line alignment to right.
ALIGN_RIGHT = 'r'
#: Sets line alignment to centered.
ALIGN_CENTER = 'c'
#: Sets line alignment to justified.
ALIGN_JUSTIFIED = 'j'
def __init__(self,
line_type=None,
indent=None,
alignment=None,
direction=None):
super(Line, self).__init__(Line.class_type,
lineType=line_type,
indent=indent,
alignment=alignment,
direction=direction)
@classmethod
def from_props(cls, props):
return Line(line_type=props.get('lineType'),
indent=props.get('indent'),
alignment=props.get('alignment'),
direction=props.get('direction'))
class Gadget(Element):
"""A gadget element."""
class_type = 'GADGET'
def __init__(self, url, props=None):
if props is None:
props = {}
props['url'] = url
super(Gadget, self).__init__(Gadget.class_type, properties=props)
@classmethod
def from_props(cls, props):
return Gadget(props.get('url'), props)
def serialize(self):
"""Gadgets allow for None values."""
return {'properties': self._properties, 'type': self._type}
def keys(self):
"""Get the valid keys for this gadget."""
return [x for x in self._properties.keys() if x != 'url']
class Installer(Element):
"""An installer element."""
class_type = 'INSTALLER'
def __init__(self, manifest):
super(Installer, self).__init__(Installer.class_type, manifest=manifest)
@classmethod
def from_props(cls, props):
return Installer(props.get('manifest'))
class Image(Element):
"""An image element."""
class_type = 'IMAGE'
def __init__(self, url='', width=None, height=None,
attachmentId=None, caption=None):
super(Image, self).__init__(Image.class_type, url=url, width=width,
height=height, attachmentId=attachmentId, caption=caption)
@classmethod
def from_props(cls, props):
props = dict([(key.encode('utf-8'), value)
for key, value in props.items()])
return apply(Image, [], props)
class Attachment(Element):
"""An attachment element.
To create a new attachment, caption and data are needed.
mimeType, attachmentId and attachmentUrl are sent via events.
"""
class_type = 'ATTACHMENT'
def __init__(self, caption=None, data=None, mimeType=None, attachmentId=None,
attachmentUrl=None):
Attachment.originalData = data
super(Attachment, self).__init__(Attachment.class_type, caption=caption,
data=data, mimeType=mimeType, attachmentId=attachmentId,
attachmentUrl=attachmentUrl)
def __getattr__(self, key):
if key and key == 'data':
return Attachment.originalData
return super(Attachment, self).__getattr__(key)
@classmethod
def from_props(cls, props):
props = dict([(key.encode('utf-8'), value)
for key, value in props.items()])
return apply(Attachment, [], props)
def serialize(self):
"""Serializes the attachment object into JSON.
The attachment data is base64 encoded.
"""
if self.data:
self._properties['data'] = base64.encodestring(self.data)
return super(Attachment, self).serialize()
def is_element(cls):
"""Returns whether the passed class is an element."""
try:
if not issubclass(cls, Element):
return False
h = hasattr(cls, 'class_type')
return hasattr(cls, 'class_type')
except TypeError:
return False
ALL = dict([(item.class_type, item) for item in globals().copy().values()
if is_element(item)])
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the element module."""
import base64
import unittest
import element
import util
class TestElement(unittest.TestCase):
"""Tests for the element.Element class."""
def testProperties(self):
el = element.Element(element.Gadget.class_type,
key='value')
self.assertEquals('value', el.key)
def testFormElement(self):
el = element.Input('input')
self.assertEquals(element.Input.class_type, el.type)
self.assertEquals(el.value, '')
self.assertEquals(el.name, 'input')
def testImage(self):
image = element.Image('http://test.com/image.png', width=100, height=100)
self.assertEquals(element.Image.class_type, image.type)
self.assertEquals(image.url, 'http://test.com/image.png')
self.assertEquals(image.width, 100)
self.assertEquals(image.height, 100)
def testAttachment(self):
attachment = element.Attachment(caption='My Favorite', data='SomefakeData')
self.assertEquals(element.Attachment.class_type, attachment.type)
self.assertEquals(attachment.caption, 'My Favorite')
self.assertEquals(attachment.data, 'SomefakeData')
def testGadget(self):
gadget = element.Gadget('http://test.com/gadget.xml')
self.assertEquals(element.Gadget.class_type, gadget.type)
self.assertEquals(gadget.url, 'http://test.com/gadget.xml')
def testInstaller(self):
installer = element.Installer('http://test.com/installer.xml')
self.assertEquals(element.Installer.class_type, installer.type)
self.assertEquals(installer.manifest, 'http://test.com/installer.xml')
def testSerialize(self):
image = element.Image('http://test.com/image.png', width=100, height=100)
s = util.serialize(image)
k = s.keys()
k.sort()
# we should really only have three things to serialize
props = s['properties']
self.assertEquals(len(props), 3)
self.assertEquals(props['url'], 'http://test.com/image.png')
self.assertEquals(props['width'], 100)
self.assertEquals(props['height'], 100)
def testSerializeAttachment(self):
attachment = element.Attachment(caption='My Favorite', data='SomefakeData')
s = util.serialize(attachment)
k = s.keys()
k.sort()
# we should really have two things to serialize
props = s['properties']
self.assertEquals(len(props), 2)
self.assertEquals(props['caption'], 'My Favorite')
self.assertEquals(props['data'], base64.encodestring('SomefakeData'))
self.assertEquals(attachment.data, 'SomefakeData')
def testSerializeLine(self):
line = element.Line(element.Line.TYPE_H1, alignment=element.Line.ALIGN_LEFT)
s = util.serialize(line)
k = s.keys()
k.sort()
# we should really only have three things to serialize
props = s['properties']
self.assertEquals(len(props), 2)
self.assertEquals(props['alignment'], 'l')
self.assertEquals(props['lineType'], 'h1')
def testSerializeGadget(self):
gadget = element.Gadget('http://test.com', {'prop1': 'a', 'prop_cap': None})
s = util.serialize(gadget)
k = s.keys()
k.sort()
# we should really only have three things to serialize
props = s['properties']
self.assertEquals(len(props), 3)
self.assertEquals(props['url'], 'http://test.com')
self.assertEquals(props['prop1'], 'a')
self.assertEquals(props['prop_cap'], None)
def testGadgetElementFromJson(self):
url = 'http://www.foo.com/gadget.xml'
json = {
'type': element.Gadget.class_type,
'properties': {
'url': url,
}
}
gadget = element.Element.from_json(json)
self.assertEquals(element.Gadget.class_type, gadget.type)
self.assertEquals(url, gadget.url)
def testImageElementFromJson(self):
url = 'http://www.foo.com/image.png'
width = '32'
height = '32'
attachment_id = '2'
caption = 'Test Image'
json = {
'type': element.Image.class_type,
'properties': {
'url': url,
'width': width,
'height': height,
'attachmentId': attachment_id,
'caption': caption,
}
}
image = element.Element.from_json(json)
self.assertEquals(element.Image.class_type, image.type)
self.assertEquals(url, image.url)
self.assertEquals(width, image.width)
self.assertEquals(height, image.height)
self.assertEquals(attachment_id, image.attachmentId)
self.assertEquals(caption, image.caption)
def testAttachmentElementFromJson(self):
caption = 'fake caption'
data = 'fake data'
mime_type = 'fake mime'
attachment_id = 'fake id'
attachment_url = 'fake URL'
json = {
'type': element.Attachment.class_type,
'properties': {
'caption': caption,
'data': data,
'mimeType': mime_type,
'attachmentId': attachment_id,
'attachmentUrl': attachment_url,
}
}
attachment = element.Element.from_json(json)
self.assertEquals(element.Attachment.class_type, attachment.type)
self.assertEquals(caption, attachment.caption)
self.assertEquals(data, attachment.data)
self.assertEquals(mime_type, attachment.mimeType)
self.assertEquals(attachment_id, attachment.attachmentId)
self.assertEquals(attachment_url, attachment.attachmentUrl)
def testFormElementFromJson(self):
name = 'button'
value = 'value'
default_value = 'foo'
json = {
'type': element.Label.class_type,
'properties': {
'name': name,
'value': value,
'defaultValue': default_value,
}
}
el = element.Element.from_json(json)
self.assertEquals(element.Label.class_type, el.type)
self.assertEquals(name, el.name)
self.assertEquals(value, el.value)
def testCanInstantiate(self):
bag = [element.Check(name='check', value='value'),
element.Button(name='button', value='caption'),
element.Input(name='input', value='caption'),
element.Label(label_for='button', caption='caption'),
element.RadioButton(name='name', group='group'),
element.RadioButtonGroup(name='name', value='value'),
element.Password(name='name', value='geheim'),
element.TextArea(name='name', value='\n\n\n'),
element.Installer(manifest='test.com/installer.xml'),
element.Line(line_type='type',
indent='3',
alignment='r',
direction='d'),
element.Gadget(url='test.com/gadget.xml',
props={'key1': 'val1', 'key2': 'val2'}),
element.Image(url='test.com/image.png', width=100, height=200),
element.Attachment(caption='fake caption', data='fake data')]
types_constructed = set([type(x) for x in bag])
types_required = set(element.ALL.values())
missing_required = types_constructed.difference(types_required)
self.assertEquals(missing_required, set())
missing_constructed = types_required.difference(types_constructed)
self.assertEquals(missing_constructed, set())
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines classes that are needed to model a wavelet."""
import blip
import errors
import util
class DataDocs(object):
"""Class modeling a bunch of data documents in pythonic way."""
def __init__(self, init_docs, wave_id, wavelet_id, operation_queue):
self._docs = init_docs
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __iter__(self):
return self._docs.__iter__()
def __contains__(self, key):
return key in self._docs
def __delitem__(self, key):
if not key in self._docs:
return
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, None)
del self._docs[key]
def __getitem__(self, key):
return self._docs[key]
def __setitem__(self, key, value):
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, value)
if value is None and key in self._docs:
del self._docs[key]
else:
self._docs[key] = value
def __len__(self):
return len(self._docs)
def keys(self):
return self._docs.keys()
def serialize(self):
"""Returns a dictionary of the data documents."""
return self._docs
class Participants(object):
"""Class modelling a set of participants in pythonic way."""
#: Designates full access (read/write) role.
ROLE_FULL = "FULL"
#: Designates read-only role.
ROLE_READ_ONLY = "READ_ONLY"
def __init__(self, participants, roles, wave_id, wavelet_id, operation_queue):
self._participants = set(participants)
self._roles = roles.copy()
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __contains__(self, participant):
return participant in self._participants
def __len__(self):
return len(self._participants)
def __iter__(self):
return self._participants.__iter__()
def add(self, participant_id):
"""Adds a participant by their ID (address)."""
self._operation_queue.wavelet_add_participant(
self._wave_id, self._wavelet_id, participant_id)
self._participants.add(participant_id)
def get_role(self, participant_id):
"""Return the role for the given participant_id."""
return self._roles.get(participant_id, Participants.ROLE_FULL)
def set_role(self, participant_id, role):
"""Sets the role for the given participant_id."""
if role != Participants.ROLE_FULL and role != Participants.ROLE_READ_ONLY:
raise ValueError(role + ' is not a valid role')
self._operation_queue.wavelet_modify_participant_role(
self._wave_id, self._wavelet_id, participant_id, role)
self._roles[participant_id] = role
def serialize(self):
"""Returns a list of the participants."""
return list(self._participants)
class Tags(object):
"""Class modelling a list of tags."""
def __init__(self, tags, wave_id, wavelet_id, operation_queue):
self._tags = list(tags)
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __getitem__(self, index):
return self._tags[index]
def __len__(self):
return len(self._tags)
def __iter__(self):
return self._tags.__iter__()
def append(self, tag):
"""Appends a tag if it doesn't already exist."""
tag = util.force_unicode(tag)
if tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag)
self._tags.append(tag)
def remove(self, tag):
"""Removes a tag if it exists."""
tag = util.force_unicode(tag)
if not tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag, modify_how='remove')
self._tags.remove(tag)
def serialize(self):
"""Returns a list of tags."""
return list(self._tags)
class Wavelet(object):
"""Models a single wavelet.
A single wavelet is composed of metadata, participants, and its blips.
To guarantee that all blips are available, specify Context.ALL for events.
"""
def __init__(self, json, blips, robot, operation_queue):
"""Inits this wavelet with JSON data.
Args:
json: JSON data dictionary from Wave server.
blips: a dictionary object that can be used to resolve blips.
robot: the robot owning this wavelet.
operation_queue: an OperationQueue object to be used to
send any generated operations to.
"""
self._robot = robot
self._operation_queue = operation_queue
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
self._creator = json.get('creator')
self._creation_time = json.get('creationTime', 0)
self._data_documents = DataDocs(json.get('dataDocuments', {}),
self._wave_id,
self._wavelet_id,
operation_queue)
self._last_modified_time = json.get('lastModifiedTime')
self._participants = Participants(json.get('participants', []),
json.get('participantRoles', {}),
self._wave_id,
self._wavelet_id,
operation_queue)
self._title = json.get('title', '')
self._tags = Tags(json.get('tags', []),
self._wave_id,
self._wavelet_id,
operation_queue)
self._raw_data = json
self._blips = blip.Blips(blips)
self._root_blip_id = json.get('rootBlipId')
if self._root_blip_id and self._root_blip_id in self._blips:
self._root_blip = self._blips[self._root_blip_id]
else:
self._root_blip = None
self._robot_address = None
@property
def wavelet_id(self):
"""Returns this wavelet's id."""
return self._wavelet_id
@property
def wave_id(self):
"""Returns this wavelet's parent wave id."""
return self._wave_id
@property
def creator(self):
"""Returns the participant id of the creator of this wavelet."""
return self._creator
@property
def creation_time(self):
"""Returns the time that this wavelet was first created in milliseconds."""
return self._creation_time
@property
def data_documents(self):
"""Returns the data documents for this wavelet based on key name."""
return self._data_documents
@property
def domain(self):
"""Return the domain that wavelet belongs to."""
p = self._wave_id.find('!')
if p == -1:
return None
else:
return self._wave_id[:p]
@property
def last_modified_time(self):
"""Returns the time that this wavelet was last modified in ms."""
return self._last_modified_time
@property
def participants(self):
"""Returns a set of participants on this wavelet."""
return self._participants
@property
def tags(self):
"""Returns a list of tags for this wavelet."""
return self._tags
@property
def robot(self):
"""The robot that owns this wavelet."""
return self._robot
def _get_title(self):
return self._title
def _set_title(self, title):
title = util.force_unicode(title)
if title.find('\n') != -1:
raise errors.Error('Wavelet title should not contain a newline ' +
'character. Specified: ' + title)
self._operation_queue.wavelet_set_title(self.wave_id, self.wavelet_id,
title)
self._title = title
# Adjust the content of the root blip, if it is available in the context.
if self._root_blip:
content = '\n'
splits = self._root_blip._content.split('\n', 2)
if len(splits) == 3:
content += splits[2]
self._root_blip._content = '\n' + title + content
#: Returns or sets the wavelet's title.
title = property(_get_title, _set_title,
doc='Get or set the title of the wavelet.')
def _get_robot_address(self):
return self._robot_address
def _set_robot_address(self, address):
if self._robot_address:
raise errors.Error('robot address already set')
self._robot_address = address
robot_address = property(_get_robot_address, _set_robot_address,
doc='Get or set the address of the current robot.')
@property
def root_blip(self):
"""Returns this wavelet's root blip."""
return self._root_blip
@property
def blips(self):
"""Returns the blips for this wavelet."""
return self._blips
def get_operation_queue(self):
"""Returns the OperationQueue for this wavelet."""
return self._operation_queue
def serialize(self):
"""Return a dict of the wavelet properties."""
return {'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'creator': self._creator,
'creationTime': self._creation_time,
'dataDocuments': self._data_documents.serialize(),
'lastModifiedTime': self._last_modified_time,
'participants': self._participants.serialize(),
'title': self._title,
'blips': self._blips.serialize(),
'rootBlipId': self._root_blip_id
}
def proxy_for(self, proxy_for_id):
"""Return a view on this wavelet that will proxy for the specified id.
A shallow copy of the current wavelet is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
If the wavelet was retrieved using the Active Robot API, that is
by fetch_wavelet, then the address of the robot must be added to the
wavelet by setting wavelet.robot_address before calling proxy_for().
"""
self.add_proxying_participant(proxy_for_id)
operation_queue = self.get_operation_queue().proxy_for(proxy_for_id)
res = Wavelet(json={},
blips={},
robot=self.robot,
operation_queue=operation_queue)
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._creator = self._creator
res._creation_time = self._creation_time
res._data_documents = self._data_documents
res._last_modified_time = self._last_modified_time
res._participants = self._participants
res._title = self._title
res._raw_data = self._raw_data
res._blips = self._blips
res._root_blip = self._root_blip
return res
def add_proxying_participant(self, id):
"""Ads a proxying participant to the wave.
Proxying participants are of the form robot+proxy@domain.com. This
convenience method constructs this id and then calls participants.add.
"""
if not self.robot_address:
raise errors.Error(
'Need a robot address to add a proxying for participant')
robotid, domain = self.robot_address.split('@', 1)
if '#' in robotid:
robotid, version = robotid.split('#')
else:
version = None
if '+' in robotid:
newid = robotid.split('+', 1)[0] + '+' + id
else:
newid = robotid + '+' + id
if version:
newid += '#' + version
newid += '@' + domain
self.participants.add(newid)
def submit_with(self, other_wavelet):
"""Submit this wavelet when the passed other wavelet is submited.
wavelets constructed outside of the event callback need to
be either explicitly submited using robot.submit(wavelet) or be
associated with a different wavelet that will be submited or
is part of the event callback.
"""
other_wavelet._operation_queue.copy_operations(self._operation_queue)
self._operation_queue = other_wavelet._operation_queue
def reply(self, initial_content=None):
"""Replies to the conversation in this wavelet.
Args:
initial_content: If set, start with this (string) content.
Returns:
A transient version of the blip that contains the reply.
"""
if not initial_content:
initial_content = u'\n'
initial_content = util.force_unicode(initial_content)
blip_data = self._operation_queue.wavelet_append_blip(
self.wave_id, self.wavelet_id, initial_content)
instance = blip.Blip(blip_data, self._blips, self._operation_queue)
self._blips._add(instance)
return instance
def delete(self, todelete):
"""Remove a blip from this wavelet.
Args:
todelete: either a blip or a blip id to be removed.
"""
if isinstance(todelete, blip.Blip):
blip_id = todelete.blip_id
else:
blip_id = todelete
self._operation_queue.blip_delete(self.wave_id, self.wavelet_id, blip_id)
self._blips._remove_with_id(blip_id)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elements are non-text bits living in blips like images, gadgets etc.
This module defines the Element class and the derived classes.
"""
import base64
import logging
import sys
import util
class Element(object):
"""Elements are non-text content within a document.
These are generally abstracted from the Robot. Although a Robot can query the
properties of an element it can only interact with the specific types that
the element represents.
Properties of elements are both accessible directly (image.url) and through
the properties dictionary (image.properties['url']). In general Element
should not be instantiated by robots, but rather rely on the derived classes.
"""
# INLINE_BLIP_TYPE is not a separate type since it shouldn't be instantiated,
# only be used for introspection
INLINE_BLIP_TYPE = "INLINE_BLIP"
def __init__(self, element_type, **properties):
"""Initializes self with the specified type and any properties.
Args:
element_type: string typed member of ELEMENT_TYPE
properties: either a dictionary of initial properties, or a dictionary
with just one member properties that is itself a dictionary of
properties. This allows us to both use
e = Element(atype, prop1=val1, prop2=prop2...)
and
e = Element(atype, properties={prop1:val1, prop2:prop2..})
"""
if len(properties) == 1 and 'properties' in properties:
properties = properties['properties']
self._type = element_type
# as long as the operation_queue of an element in None, it is
# unattached. After an element is acquired by a blip, the blip
# will set the operation_queue to make sure all changes to the
# element are properly send to the server.
self._operation_queue = None
self._properties = properties.copy()
@property
def type(self):
"""The type of this element."""
return self._type
@classmethod
def from_json(cls, json):
"""Class method to instantiate an Element based on a json string."""
etype = json['type']
props = json['properties'].copy()
element_class = ALL.get(etype)
if not element_class:
# Unknown type. Server could be newer than we are
return Element(element_type=etype, properties=props)
return element_class.from_props(props)
def get(self, key, default=None):
"""Standard get interface."""
return self._properties.get(key, default)
def __getattr__(self, key):
return self._properties[key]
def serialize(self):
"""Custom serializer for Elements."""
return util.serialize({'properties': util.non_none_dict(self._properties),
'type': self._type})
class Input(Element):
"""A single-line input element."""
class_type = 'INPUT'
def __init__(self, name, value=''):
super(Input, self).__init__(Input.class_type,
name=name,
value=value,
default_value=value)
@classmethod
def from_props(cls, props):
return Input(name=props.get('name'), value=props.get('value'))
class Check(Element):
"""A checkbox element."""
class_type = 'CHECK'
def __init__(self, name, value=''):
super(Check, self).__init__(Check.class_type,
name=name, value=value, default_value=value)
@classmethod
def from_props(cls, props):
return Check(name=props.get('name'), value=props.get('value'))
class Button(Element):
"""A button element."""
class_type = 'BUTTON'
def __init__(self, name, value):
super(Button, self).__init__(Button.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return Button(name=props.get('name'), value=props.get('value'))
class Label(Element):
"""A label element."""
class_type = 'LABEL'
def __init__(self, label_for, caption):
super(Label, self).__init__(Label.class_type,
name=label_for, value=caption)
@classmethod
def from_props(cls, props):
return Label(label_for=props.get('name'), caption=props.get('value'))
class RadioButton(Element):
"""A radio button element."""
class_type = 'RADIO_BUTTON'
def __init__(self, name, group):
super(RadioButton, self).__init__(RadioButton.class_type,
name=name, value=group)
@classmethod
def from_props(cls, props):
return RadioButton(name=props.get('name'), group=props.get('value'))
class RadioButtonGroup(Element):
"""A group of radio buttons."""
class_type = 'RADIO_BUTTON_GROUP'
def __init__(self, name, value):
super(RadioButtonGroup, self).__init__(RadioButtonGroup.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return RadioButtonGroup(name=props.get('name'), value=props.get('value'))
class Password(Element):
"""A password element."""
class_type = 'PASSWORD'
def __init__(self, name, value):
super(Password, self).__init__(Password.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return Password(name=props.get('name'), value=props.get('value'))
class TextArea(Element):
"""A text area element."""
class_type = 'TEXTAREA'
def __init__(self, name, value):
super(TextArea, self).__init__(TextArea.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return TextArea(name=props.get('name'), value=props.get('value'))
class Line(Element):
"""A line element.
Note that Lines are represented in the text as newlines.
"""
class_type = 'LINE'
# Possible line types:
#: Designates line as H1, largest heading.
TYPE_H1 = 'h1'
#: Designates line as H2 heading.
TYPE_H2 = 'h2'
#: Designates line as H3 heading.
TYPE_H3 = 'h3'
#: Designates line as H4 heading.
TYPE_H4 = 'h4'
#: Designates line as H5, smallest heading.
TYPE_H5 = 'h5'
#: Designates line as a bulleted list item.
TYPE_LI = 'li'
# Possible values for align
#: Sets line alignment to left.
ALIGN_LEFT = 'l'
#: Sets line alignment to right.
ALIGN_RIGHT = 'r'
#: Sets line alignment to centered.
ALIGN_CENTER = 'c'
#: Sets line alignment to justified.
ALIGN_JUSTIFIED = 'j'
def __init__(self,
line_type=None,
indent=None,
alignment=None,
direction=None):
super(Line, self).__init__(Line.class_type,
lineType=line_type,
indent=indent,
alignment=alignment,
direction=direction)
@classmethod
def from_props(cls, props):
return Line(line_type=props.get('lineType'),
indent=props.get('indent'),
alignment=props.get('alignment'),
direction=props.get('direction'))
class Gadget(Element):
"""A gadget element."""
class_type = 'GADGET'
def __init__(self, url, props=None):
if props is None:
props = {}
props['url'] = url
super(Gadget, self).__init__(Gadget.class_type, properties=props)
@classmethod
def from_props(cls, props):
return Gadget(props.get('url'), props)
def serialize(self):
"""Gadgets allow for None values."""
return {'properties': self._properties, 'type': self._type}
def keys(self):
"""Get the valid keys for this gadget."""
return [x for x in self._properties.keys() if x != 'url']
class Installer(Element):
"""An installer element."""
class_type = 'INSTALLER'
def __init__(self, manifest):
super(Installer, self).__init__(Installer.class_type, manifest=manifest)
@classmethod
def from_props(cls, props):
return Installer(props.get('manifest'))
class Image(Element):
"""An image element."""
class_type = 'IMAGE'
def __init__(self, url='', width=None, height=None,
attachmentId=None, caption=None):
super(Image, self).__init__(Image.class_type, url=url, width=width,
height=height, attachmentId=attachmentId, caption=caption)
@classmethod
def from_props(cls, props):
props = dict([(key.encode('utf-8'), value)
for key, value in props.items()])
return apply(Image, [], props)
class Attachment(Element):
"""An attachment element.
To create a new attachment, caption and data are needed.
mimeType, attachmentId and attachmentUrl are sent via events.
"""
class_type = 'ATTACHMENT'
def __init__(self, caption=None, data=None, mimeType=None, attachmentId=None,
attachmentUrl=None):
Attachment.originalData = data
super(Attachment, self).__init__(Attachment.class_type, caption=caption,
data=data, mimeType=mimeType, attachmentId=attachmentId,
attachmentUrl=attachmentUrl)
def __getattr__(self, key):
if key and key == 'data':
return Attachment.originalData
return super(Attachment, self).__getattr__(key)
@classmethod
def from_props(cls, props):
props = dict([(key.encode('utf-8'), value)
for key, value in props.items()])
return apply(Attachment, [], props)
def serialize(self):
"""Serializes the attachment object into JSON.
The attachment data is base64 encoded.
"""
if self.data:
self._properties['data'] = base64.encodestring(self.data)
return super(Attachment, self).serialize()
def is_element(cls):
"""Returns whether the passed class is an element."""
try:
if not issubclass(cls, Element):
return False
h = hasattr(cls, 'class_type')
return hasattr(cls, 'class_type')
except TypeError:
return False
ALL = dict([(item.class_type, item) for item in globals().copy().values()
if is_element(item)])
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run all unit tests in this package."""
import blip_test
import element_test
import module_test_runner
import ops_test
import robot_test
import util_test
import wavelet_test
def RunUnitTests():
"""Runs all registered unit tests."""
test_runner = module_test_runner.ModuleTestRunner()
test_runner.modules = [
blip_test,
element_test,
ops_test,
robot_test,
util_test,
wavelet_test,
]
test_runner.RunAllTests()
if __name__ == "__main__":
RunUnitTests()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to run wave robots on app engine."""
import logging
import sys
import events
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class CapabilitiesHandler(webapp.RequestHandler):
"""Handler to forward a request ot a handler of a robot."""
def __init__(self, method, contenttype):
"""Initializes this handler with a specific robot."""
self._method = method
self._contenttype = contenttype
def get(self):
"""Handles HTTP GET request."""
self.response.headers['Content-Type'] = self._contenttype
self.response.out.write(self._method())
class ProfileHandler(webapp.RequestHandler):
"""Handler to forward a request ot a handler of a robot."""
def __init__(self, method, contenttype):
"""Initializes this handler with a specific robot."""
self._method = method
self._contenttype = contenttype
def get(self):
"""Handles HTTP GET request."""
self.response.headers['Content-Type'] = self._contenttype
# Respond with proxied profile if name specified
if self.request.get('name'):
self.response.out.write(self._method(self.request.get('name')))
else:
self.response.out.write(self._method())
class RobotEventHandler(webapp.RequestHandler):
"""Handler for the dispatching of events to various handlers to a robot.
This handler only responds to post events with a JSON post body. Its primary
task is to separate out the context data from the events in the post body
and dispatch all events in order. Once all events have been dispatched
it serializes the context data and its associated operations as a response.
"""
def __init__(self, robot):
"""Initializes self with a specific robot."""
self._robot = robot
def get(self):
"""Handles the get event for debugging.
This is useful for debugging but since event bundles tend to be
rather big it often won't fit for more complex requests.
"""
ops = self.request.get('events')
if ops:
self.request.body = events
self.post()
def post(self):
"""Handles HTTP POST requests."""
json_body = self.request.body
if not json_body:
# TODO(davidbyttow): Log error?
return
# Redirect stdout to stderr while executing handlers. This way, any stray
# "print" statements in bot code go to the error logs instead of breaking
# the JSON response sent to the HTTP channel.
saved_stdout, sys.stdout = sys.stdout, sys.stderr
json_body = unicode(json_body, 'utf8')
logging.info('Incoming: %s', json_body)
json_response = self._robot.process_events(json_body)
logging.info('Outgoing: %s', json_response)
sys.stdout = saved_stdout
# Build the response.
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(json_response.encode('utf-8'))
def operation_error_handler(event, wavelet):
"""Default operation error handler, logging what went wrong."""
if isinstance(event, events.OperationError):
logging.error('Previously operation failed: id=%s, message: %s',
event.operation_id, event.error_message)
def appengine_post(url, data, headers):
result = urlfetch.fetch(
method='POST',
url=url,
payload=data,
headers=headers,
deadline=10)
return result.status_code, result.content
class RobotVerifyTokenHandler(webapp.RequestHandler):
"""Handler for the token_verify request."""
def __init__(self, robot):
"""Initializes self with a specific robot."""
self._robot = robot
def get(self):
"""Handles the get event for debugging. Ops usually too long."""
token, st = self._robot.get_verification_token_info()
logging.info('token=%s' % token)
if token is None:
self.error(404)
self.response.out.write('No token set')
return
if not st is None:
if self.request.get('st') != st:
self.response.out.write('Invalid st value passed %s != %s' % (st, self.request.get('st')))
return
self.response.out.write(token)
def create_robot_webapp(robot, debug=False, extra_handlers=None):
"""Returns an instance of webapp.WSGIApplication with robot handlers."""
if not extra_handlers:
extra_handlers = []
return webapp.WSGIApplication([('.*/_wave/capabilities.xml',
lambda: CapabilitiesHandler(
robot.capabilities_xml,
'application/xml')),
('.*/_wave/robot/profile',
lambda: ProfileHandler(
robot.profile_json,
'application/json')),
('.*/_wave/robot/jsonrpc',
lambda: RobotEventHandler(robot)),
('.*/_wave/verify_token',
lambda: RobotVerifyTokenHandler(robot)),
] + extra_handlers,
debug=debug)
def run(robot, debug=False, log_errors=True, extra_handlers=None):
"""Sets up the webapp handlers for this robot and starts listening.
A robot is typically setup in the following steps:
1. Instantiate and define robot.
2. Register various handlers that it is interested in.
3. Call Run, which will setup the handlers for the app.
For example:
robot = Robot('Terminator',
image_url='http://www.sky.net/models/t800.png',
profile_url='http://www.sky.net/models/t800.html')
robot.register_handler(WAVELET_PARTICIPANTS_CHANGED, KillParticipant)
run(robot)
Args:
robot: the robot to run. This robot is modified to use app engines
urlfetch for posting http.
debug: Optional variable that defaults to False and is passed through
to the webapp application to determine if it should show debug info.
log_errors: Optional flag that defaults to True and determines whether
a default handlers to catch errors should be setup that uses the
app engine logging to log errors.
extra_handlers: Optional list of tuples that are passed to the webapp
to install more handlers. For example, passing
[('/about', AboutHandler),] would install an extra about handler
for the robot.
"""
# App Engine expects to construct a class with no arguments, so we
# pass a lambda that constructs the appropriate handler with
# arguments from the enclosing scope.
if log_errors:
robot.register_handler(events.OperationError, operation_error_handler)
robot.http_post = appengine_post
app = create_robot_webapp(robot, debug, extra_handlers)
run_wsgi_app(app)
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the blip module."""
import unittest
import blip
import element
import ops
import simplejson
TEST_BLIP_DATA = {
'childBlipIds': [],
'content': '\nhello world!\nanother line',
'contributors': ['robot@test.com', 'user@test.com'],
'creator': 'user@test.com',
'lastModifiedTime': 1000,
'parentBlipId': None,
'annotations': [{'range': {'start': 2, 'end': 3},
'name': 'key', 'value': 'val'}],
'waveId': 'test.com!w+g3h3im',
'waveletId': 'test.com!root+conv',
'elements':{'14':{'type':'GADGET','properties':{'url':'http://a/b.xml'}}},
}
CHILD_BLIP_ID = 'b+42'
ROOT_BLIP_ID = 'b+43'
class TestBlip(unittest.TestCase):
"""Tests the primary data structures for the wave model."""
def assertBlipStartswith(self, expected, totest):
actual = totest.text[:len(expected)]
self.assertEquals(expected, actual)
def new_blip(self, **args):
"""Create a blip for testing."""
data = TEST_BLIP_DATA.copy()
data.update(args)
res = blip.Blip(data, self.all_blips, self.operation_queue)
self.all_blips[res.blip_id] = res
return res
def setUp(self):
self.all_blips = {}
self.operation_queue = ops.OperationQueue()
def testBlipProperties(self):
root = self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID])
child = self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
self.assertEquals(ROOT_BLIP_ID, root.blip_id)
self.assertEquals(set([CHILD_BLIP_ID]), root.child_blip_ids)
self.assertEquals(set(TEST_BLIP_DATA['contributors']), root.contributors)
self.assertEquals(TEST_BLIP_DATA['creator'], root.creator)
self.assertEquals(TEST_BLIP_DATA['content'], root.text)
self.assertEquals(TEST_BLIP_DATA['lastModifiedTime'],
root.last_modified_time)
self.assertEquals(TEST_BLIP_DATA['parentBlipId'], root.parent_blip_id)
self.assertEquals(TEST_BLIP_DATA['waveId'], root.wave_id)
self.assertEquals(TEST_BLIP_DATA['waveletId'], root.wavelet_id)
self.assertEquals(TEST_BLIP_DATA['content'][3], root[3])
self.assertEquals(element.Gadget.class_type, root[14].type)
self.assertEquals('http://a/b.xml', root[14].url)
self.assertEquals('a', root.text[14])
self.assertEquals(len(TEST_BLIP_DATA['content']), len(root))
self.assertTrue(root.is_root())
self.assertFalse(child.is_root())
self.assertEquals(root, child.parent_blip)
def testBlipSerialize(self):
root = self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID])
serialized = root.serialize()
unserialized = blip.Blip(serialized, self.all_blips, self.operation_queue)
self.assertEquals(root.blip_id, unserialized.blip_id)
self.assertEquals(root.child_blip_ids, unserialized.child_blip_ids)
self.assertEquals(root.contributors, unserialized.contributors)
self.assertEquals(root.creator, unserialized.creator)
self.assertEquals(root.text, unserialized.text)
self.assertEquals(root.last_modified_time, unserialized.last_modified_time)
self.assertEquals(root.parent_blip_id, unserialized.parent_blip_id)
self.assertEquals(root.wave_id, unserialized.wave_id)
self.assertEquals(root.wavelet_id, unserialized.wavelet_id)
self.assertTrue(unserialized.is_root())
def testDocumentOperations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
newlines = [x for x in blip.find('\n')]
self.assertEquals(2, len(newlines))
blip.first('world').replace('jupiter')
bits = blip.text.split('\n')
self.assertEquals(3, len(bits))
self.assertEquals('hello jupiter!', bits[1])
blip.range(2, 5).delete()
self.assertBlipStartswith('\nho jupiter', blip)
blip.first('ho').insert_after('la')
self.assertBlipStartswith('\nhola jupiter', blip)
blip.at(3).insert(' ')
self.assertBlipStartswith('\nho la jupiter', blip)
def testElementHandling(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
url = 'http://www.test.com/image.png'
org_len = len(blip)
blip.append(element.Image(url=url))
elems = [elem for elem in blip.find(element.Image, url=url)]
self.assertEquals(1, len(elems))
elem = elems[0]
self.assertTrue(isinstance(elem, element.Image))
blip.at(1).insert('twelve chars')
self.assertTrue(blip.text.startswith('\ntwelve charshello'))
elem = blip[org_len + 12].value()
self.assertTrue(isinstance(elem, element.Image))
blip.first('twelve ').delete()
self.assertTrue(blip.text.startswith('\nchars'))
elem = blip[org_len + 12 - len('twelve ')].value()
self.assertTrue(isinstance(elem, element.Image))
blip.first('chars').replace(element.Image(url=url))
elems = [elem for elem in blip.find(element.Image, url=url)]
self.assertEquals(2, len(elems))
self.assertTrue(blip.text.startswith('\n hello'))
elem = blip[1].value()
self.assertTrue(isinstance(elem, element.Image))
def testAnnotationHandling(self):
key = 'style/fontWeight'
def get_bold():
for an in blip.annotations[key]:
if an.value == 'bold':
return an
return None
json = ('[{"range":{"start":3,"end":6},"name":"%s","value":"bold"}]'
% key)
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json))
self.assertEquals(1, len(blip.annotations))
self.assertNotEqual(None, get_bold().value)
self.assertTrue(key in blip.annotations)
# extend the bold annotation by adding:
blip.range(5, 8).annotate(key, 'bold')
self.assertEquals(1, len(blip.annotations))
self.assertEquals(8, get_bold().end)
# clip by adding a same keyed:
blip[4:12].annotate(key, 'italic')
self.assertEquals(2, len(blip.annotations[key]))
self.assertEquals(4, get_bold().end)
# now split the italic one:
blip.range(6, 7).clear_annotation(key)
self.assertEquals(3, len(blip.annotations[key]))
# test names and iteration
self.assertEquals(1, len(blip.annotations.names()))
self.assertEquals(3, len([x for x in blip.annotations]))
blip[3: 5].annotate('foo', 'bar')
self.assertEquals(2, len(blip.annotations.names()))
self.assertEquals(4, len([x for x in blip.annotations]))
blip[3: 5].clear_annotation('foo')
# clear the whole thing
blip.all().clear_annotation(key)
# getting to the key should now throw an exception
self.assertRaises(KeyError, blip.annotations.__getitem__, key)
def testBlipOperations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
self.assertEquals(1, len(self.all_blips))
otherblip = blip.reply()
otherblip.append('hello world')
self.assertEquals('hello world', otherblip.text)
self.assertEquals(blip.blip_id, otherblip.parent_blip_id)
self.assertEquals(2, len(self.all_blips))
inline = blip.insert_inline_blip(3)
self.assertEquals(blip.blip_id, inline.parent_blip_id)
self.assertEquals(3, len(self.all_blips))
def testInsertInlineBlipCantInsertAtTheBeginning(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
self.assertEquals(1, len(self.all_blips))
self.assertRaises(IndexError, blip.insert_inline_blip, 0)
self.assertEquals(1, len(self.all_blips))
def testDocumentModify(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('a text with text and then some text')
blip[7].insert('text ')
blip.all('text').replace('thing')
self.assertEquals('a thing thing with thing and then some thing',
blip.text)
def testIteration(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('aaa 012 aaa 345 aaa 322')
count = 0
prev = -1
for start, end in blip.all('aaa'):
count += 1
self.assertTrue(prev < start)
prev = start
self.assertEquals(3, count)
def testBlipRefValue(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
content = blip.text
content = content[:4] + content[5:]
del blip[4]
self.assertEquals(content, blip.text)
content = content[:2] + content[3:]
del blip[2:3]
self.assertEquals(content, blip.text)
blip[2:3] = 'bike'
content = content[:2] + 'bike' + content[3:]
self.assertEquals(content, blip.text)
url = 'http://www.test.com/image.png'
blip.append(element.Image(url=url))
self.assertEqual(url, blip.first(element.Image).url)
url2 = 'http://www.test.com/another.png'
blip[-1].update_element({'url': url2})
self.assertEqual(url2, blip.first(element.Image).url)
self.assertTrue(blip[3:5] == blip.text[3:5])
blip.append('geheim')
self.assertTrue(blip.first('geheim'))
self.assertFalse(blip.first(element.Button))
blip.append(element.Button(name='test1', value='Click'))
button = blip.first(element.Button)
button.update_element({'name': 'test2'})
self.assertEqual('test2', button.name)
def testReplace(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('\nxxxx')
blip.all('yyy').replace('zzz')
self.assertEqual('\nxxxx', blip.text)
def testDeleteRangeThatSpansAcrossAnnotationEndPoint(self):
json = ('[{"range":{"start":1,"end":3},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).delete()
self.assertEqual('\nF bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(2, blip.annotations['style'][0].end)
def testInsertBeforeAnnotationStartPoint(self):
json = ('[{"range":{"start":4,"end":9},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.at(4).insert('d and')
self.assertEqual('\nFood and bar.', blip.text)
self.assertEqual(9, blip.annotations['style'][0].start)
self.assertEqual(14, blip.annotations['style'][0].end)
def testDeleteRangeInsideAnnotation(self):
json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).delete()
self.assertEqual('\nF bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(3, blip.annotations['style'][0].end)
def testReplaceInsideAnnotation(self):
json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).replace('ooo')
self.assertEqual('\nFooo bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(6, blip.annotations['style'][0].end)
blip.range(2, 5).replace('o')
self.assertEqual('\nFo bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(4, blip.annotations['style'][0].end)
def testReplaceSpanAnnotation(self):
json = ('[{"range":{"start":1,"end":4},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 9).replace('')
self.assertEqual('\nF', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(2, blip.annotations['style'][0].end)
def testSearchWithNoMatchShouldNotGenerateOperation(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
self.assertEqual(-1, blip.text.find(':('))
self.assertEqual(0, len(self.operation_queue))
blip.all(':(').replace(':)')
self.assertEqual(0, len(self.operation_queue))
def testBlipsRemoveWithId(self):
blip_dict = {
ROOT_BLIP_ID: self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID]),
CHILD_BLIP_ID: self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
}
blips = blip.Blips(blip_dict)
blips._remove_with_id(CHILD_BLIP_ID)
self.assertEqual(1, len(blips))
self.assertEqual(0, len(blips[ROOT_BLIP_ID].child_blip_ids))
def testAppendMarkup(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.')
markup = '<p><span>markup<span> content</p>'
blip.append_markup(markup)
self.assertEqual(1, len(self.operation_queue))
self.assertEqual('\nFoo bar.\nmarkup content', blip.text)
def testBundledAnnotations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.')
blip.append('not bold')
blip.append('bold', bundled_annotations=[('style/fontWeight', 'bold')])
self.assertEqual(2, len(blip.annotations))
self.assertEqual('bold', blip.annotations['style/fontWeight'][0].value)
def testInlineBlipOffset(self):
offset = 14
self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID],
elements={str(offset):
{'type': element.Element.INLINE_BLIP_TYPE,
'properties': {'id': CHILD_BLIP_ID}}})
child = self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
self.assertEqual(offset, child.inline_blip_offset)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for operations that can be applied to the server.
Contains classes and utilities for creating operations that are to be
applied on the server.
"""
import errors
import random
import util
import sys
PROTOCOL_VERSION = '0.21'
# Operation Types
WAVELET_APPEND_BLIP = 'wavelet.appendBlip'
WAVELET_SET_TITLE = 'wavelet.setTitle'
WAVELET_ADD_PARTICIPANT = 'wavelet.participant.add'
WAVELET_DATADOC_SET = 'wavelet.datadoc.set'
WAVELET_MODIFY_TAG = 'wavelet.modifyTag'
WAVELET_MODIFY_PARTICIPANT_ROLE = 'wavelet.modifyParticipantRole'
BLIP_CREATE_CHILD = 'blip.createChild'
BLIP_DELETE = 'blip.delete'
DOCUMENT_APPEND_MARKUP = 'document.appendMarkup'
DOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert'
DOCUMENT_MODIFY = 'document.modify'
ROBOT_CREATE_WAVELET = 'robot.createWavelet'
ROBOT_FETCH_WAVE = 'robot.fetchWave'
ROBOT_NOTIFY_CAPABILITIES_HASH = 'robot.notifyCapabilitiesHash'
class Operation(object):
"""Represents a generic operation applied on the server.
This operation class contains data that is filled in depending on the
operation type.
It can be used directly, but doing so will not result
in local, transient reflection of state on the blips. In other words,
creating a 'delete blip' operation will not remove the blip from the local
context for the duration of this session. It is better to use the OpBased
model classes directly instead.
"""
def __init__(self, method, opid, params):
"""Initializes this operation with contextual data.
Args:
method: Method to call or type of operation.
opid: The id of the operation. Any callbacks will refer to these.
params: An operation type dependent dictionary
"""
self.method = method
self.id = opid
self.params = params
def __str__(self):
return '%s[%s]%s' % (self.method, self.id, str(self.params))
def set_param(self, param, value):
self.params[param] = value
return self
def serialize(self, method_prefix=''):
"""Serialize the operation.
Args:
method_prefix: prefixed for each method name to allow for specifying
a namespace.
Returns:
a dict representation of the operation.
"""
if method_prefix and not method_prefix.endswith('.'):
method_prefix += '.'
return {'method': method_prefix + self.method,
'id': self.id,
'params': util.serialize(self.params)}
def set_optional(self, param, value):
"""Sets an optional parameter.
If value is None or "", this is a no op. Otherwise it calls
set_param.
"""
if value == '' or value is None:
return self
else:
return self.set_param(param, value)
class OperationQueue(object):
"""Wraps the queuing of operations using easily callable functions.
The operation queue wraps single operations as functions and queues the
resulting operations in-order. Typically there shouldn't be a need to
call this directly unless operations are needed on entities outside
of the scope of the robot. For example, to modify a blip that
does not exist in the current context, you might specify the wave, wavelet
and blip id to generate an operation.
Any calls to this will not be reflected in the robot in any way.
For example, calling wavelet_append_blip will not result in a new blip
being added to the robot, only an operation to be applied on the
server.
"""
# Some class global counters:
_next_operation_id = 1
def __init__(self, proxy_for_id=None):
self.__pending = []
self._capability_hash = 0
self._proxy_for_id = proxy_for_id
def _new_blipdata(self, wave_id, wavelet_id, initial_content='',
parent_blip_id=None):
"""Creates JSON of the blip used for this session."""
temp_blip_id = 'TBD_%s_%s' % (wavelet_id,
hex(random.randint(0, sys.maxint)))
return {'waveId': wave_id,
'waveletId': wavelet_id,
'blipId': temp_blip_id,
'content': initial_content,
'parentBlipId': parent_blip_id}
def _new_waveletdata(self, domain, participants):
"""Creates an ephemeral WaveletData instance used for this session.
Args:
domain: the domain to create the data for.
participants initially on the wavelet
Returns:
Blipdata (for the rootblip), WaveletData.
"""
wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint))
wavelet_id = domain + '!conv+root'
root_blip_data = self._new_blipdata(wave_id, wavelet_id)
participants = set(participants)
wavelet_data = {'waveId': wave_id,
'waveletId': wavelet_id,
'rootBlipId': root_blip_data['blipId'],
'participants': participants}
return root_blip_data, wavelet_data
def __len__(self):
return len(self.__pending)
def __iter__(self):
return self.__pending.__iter__()
def clear(self):
self.__pending = []
def proxy_for(self, proxy):
"""Return a view of this operation queue with the proxying for set to proxy.
This method returns a new instance of an operation queue that shares the
operation list, but has a different proxying_for_id set so the robot using
this new queue will send out operations with the proxying_for field set.
"""
res = OperationQueue()
res.__pending = self.__pending
res._capability_hash = self._capability_hash
res._proxy_for_id = proxy
return res
def set_capability_hash(self, capability_hash):
self._capability_hash = capability_hash
def serialize(self):
first = Operation(ROBOT_NOTIFY_CAPABILITIES_HASH,
'0',
{'capabilitiesHash': self._capability_hash,
'protocolVersion': PROTOCOL_VERSION})
operations = [first] + self.__pending
res = util.serialize(operations)
return res
def copy_operations(self, other_queue):
"""Copy the pending operations from other_queue into this one."""
for op in other_queue:
self.__pending.append(op)
def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops):
"""Creates and adds a new operation to the operation list."""
if props is None:
props = {}
props.update(kwprops)
props['waveId'] = wave_id
props['waveletId'] = wavelet_id
if self._proxy_for_id:
props['proxyingFor'] = self._proxy_for_id
operation = Operation(method,
'op%s' % OperationQueue._next_operation_id,
props)
self.__pending.append(operation)
OperationQueue._next_operation_id += 1
return operation
def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''):
"""Appends a blip to a wavelet.
Args:
wave_id: The wave id owning the containing wavelet.
wavelet_id: The wavelet id that this blip should be appended to.
initial_content: optionally the content to start with
Returns:
JSON representing the information of the new blip.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content)
self.new_operation(WAVELET_APPEND_BLIP, wave_id,
wavelet_id, blipData=blip_data)
return blip_data
def wavelet_add_participant(self, wave_id, wavelet_id, participant_id):
"""Adds a participant to a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id,
participantId=participant_id)
def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data):
"""Sets a key/value pair on the data document of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
name: The key name for this data.
data: The value of the data to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id,
datadocName=name, datadocValue=data)
def robot_create_wavelet(self, domain, participants=None, message=''):
"""Creates a new wavelet.
Args:
domain: the domain to create the wave in
participants: initial participants on this wavelet or None if none
message: an optional payload that is returned with the corresponding
event.
Returns:
data for the root_blip, wavelet
"""
if participants is None:
participants = []
blip_data, wavelet_data = self._new_waveletdata(domain, participants)
op = self.new_operation(ROBOT_CREATE_WAVELET,
wave_id=wavelet_data['waveId'],
wavelet_id=wavelet_data['waveletId'],
waveletData=wavelet_data)
op.set_optional('message', message)
return blip_data, wavelet_data
def robot_fetch_wave(self, wave_id, wavelet_id):
"""Requests a snapshot of the specified wave.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id)
def wavelet_set_title(self, wave_id, wavelet_id, title):
"""Sets the title of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
title: The title to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id,
waveletTitle=title)
def wavelet_modify_participant_role(
self, wave_id, wavelet_id, participant_id, role):
"""Modify the role of a participant on a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
role: the new roles
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_MODIFY_PARTICIPANT_ROLE, wave_id,
wavelet_id, participantId=participant_id,
participantRole=role)
def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None):
"""Modifies a tag in a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
tag: The tag (a string).
modify_how: (optional) how to apply the tag. The default is to add
the tag. Specify 'remove' to remove. Specify None or 'add' to
add.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id,
name=tag).set_optional("modify_how", modify_how)
def blip_create_child(self, wave_id, wavelet_id, blip_id):
"""Creates a child blip of another blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
JSON of blip for which further operations can be applied.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id)
self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id,
blipId=blip_id,
blipData=blip_data)
return blip_data
def blip_delete(self, wave_id, wavelet_id, blip_id):
"""Deletes the specified blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id)
def document_append_markup(self, wave_id, wavelet_id, blip_id, content):
"""Appends content with markup to a document.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
content: The markup content to append.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id,
blipId=blip_id, content=content)
def document_modify(self, wave_id, wavelet_id, blip_id):
"""Creates and queues a document modify operation
The returned operation still needs to be filled with details before
it makes sense.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_MODIFY,
wave_id,
wavelet_id,
blipId=blip_id)
def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position):
"""Inserts an inline blip at a specific location.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
position: The position in the document to insert the blip.
Returns:
JSON data for the blip that was created for further operations.
"""
inline_blip_data = self._new_blipdata(wave_id, wavelet_id)
inline_blip_data['parentBlipId'] = blip_id
self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id,
blipId=blip_id,
index=position,
blipData=inline_blip_data)
return inline_blip_data
| Python |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Tests for google3.walkabout.externalagents.api.commandline_robot_runner."""
__author__ = 'douwe@google.com (Douwe Osinga)'
import StringIO
from google3.pyglib import app
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.walkabout.externalagents.api import commandline_robot_runner
from google3.walkabout.externalagents.api import events
FLAGS = flags.FLAGS
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13",'
'"annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/otherguy@test.com","value":"Other"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
class CommandlineRobotRunnerTest(googletest.TestCase):
def testSimpleFlow(self):
FLAGS.eventdef_wavelet_participants_changed = 'x'
flag = 'eventdef_' + events.WaveletParticipantsChanged.type.lower()
setattr(FLAGS, flag, 'w.title="New title!"')
input_stream = StringIO.StringIO(TEST_JSON)
output_stream = StringIO.StringIO()
commandline_robot_runner.run_bot(input_stream, output_stream)
res = output_stream.getvalue()
self.assertTrue('wavelet.setTitle' in res)
def main(unused_argv):
googletest.main()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the ops module."""
import unittest
import ops
class TestOperation(unittest.TestCase):
"""Test case for Operation class."""
def testFields(self):
op = ops.Operation(ops.WAVELET_SET_TITLE, 'opid02',
{'waveId': 'wavelet-id',
'title': 'a title'})
self.assertEqual(ops.WAVELET_SET_TITLE, op.method)
self.assertEqual('opid02', op.id)
self.assertEqual(2, len(op.params))
def testConstructModifyTag(self):
q = ops.OperationQueue()
op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag')
self.assertEqual(3, len(op.params))
op = q.wavelet_modify_tag(
'waveid', 'waveletid', 'tag', modify_how='remove')
self.assertEqual(4, len(op.params))
def testConstructRobotFetchWave(self):
q = ops.OperationQueue('proxyid')
op = q.robot_fetch_wave('wave1', 'wavelet1')
self.assertEqual(3, len(op.params))
self.assertEqual('proxyid', op.params['proxyingFor'])
self.assertEqual('wave1', op.params['waveId'])
self.assertEqual('wavelet1', op.params['waveletId'])
class TestOperationQueue(unittest.TestCase):
"""Test case for OperationQueue class."""
def testSerialize(self):
q = ops.OperationQueue()
q.set_capability_hash('hash')
op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag')
json = q.serialize()
self.assertEqual(2, len(json))
self.assertEqual('robot.notifyCapabilitiesHash', json[0]['method'])
self.assertEqual('hash', json[0]['params']['capabilitiesHash'])
self.assertEqual(ops.PROTOCOL_VERSION, json[0]['params']['protocolVersion'])
self.assertEqual('wavelet.modifyTag', json[1]['method'])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility library containing various helpers used by the API."""
import re
CUSTOM_SERIALIZE_METHOD_NAME = 'serialize'
MARKUP_RE = re.compile(r'<([^>]*?)>')
def force_unicode(object):
""" Return the Unicode string version of object, with UTF-8 encoding. """
if isinstance(object, unicode):
return object
return unicode(str(object), 'utf-8')
def parse_markup(markup):
"""Parses a bit of markup into robot compatible text.
For now this is a rough approximation.
"""
def replace_tag(group):
if not group.groups:
return ''
tag = group.groups()[0].split(' ', 1)[0]
if (tag == 'p' or tag == 'br'):
return '\n'
return ''
return MARKUP_RE.sub(replace_tag, markup)
def is_iterable(inst):
"""Returns whether or not this is a list, tuple, set or dict .
Note that this does not return true for strings.
"""
return hasattr(inst, '__iter__')
def is_dict(inst):
"""Returns whether or not the specified instance is a dict."""
return hasattr(inst, 'iteritems')
def is_user_defined_new_style_class(obj):
"""Returns whether or not the specified instance is a user-defined type."""
return type(obj).__module__ != '__builtin__'
def lower_camel_case(s):
"""Converts a string to lower camel case.
Examples:
foo => foo
foo_bar => fooBar
foo__bar => fooBar
foo_bar_baz => fooBarBaz
Args:
s: The string to convert to lower camel case.
Returns:
The lower camel cased string.
"""
return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_'))
def non_none_dict(d):
"""return a copy of the dictionary without none values."""
return dict([a for a in d.items() if not a[1] is None])
def _serialize_attributes(obj):
"""Serializes attributes of an instance.
Iterates all attributes of an object and invokes serialize if they are
public and not callable.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
data = {}
for attr_name in dir(obj):
if attr_name.startswith('_'):
continue
attr = getattr(obj, attr_name)
if attr is None or callable(attr):
continue
# Looks okay, serialize it.
data[lower_camel_case(attr_name)] = serialize(attr)
return data
def _serialize_dict(d):
"""Invokes serialize on all of its key/value pairs.
Args:
d: The dict instance to serialize.
Returns:
The serialized dict.
"""
data = {}
for k, v in d.items():
data[lower_camel_case(k)] = serialize(v)
return data
def serialize(obj):
"""Serializes any instance.
If this is a user-defined instance
type, it will first check for a custom Serialize() function and use that
if it exists. Otherwise, it will invoke serialize all of its public
attributes. Lists and dicts are serialized trivially.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
if is_user_defined_new_style_class(obj):
if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME):
method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME)
if callable(method):
return method()
return _serialize_attributes(obj)
elif is_dict(obj):
return _serialize_dict(obj)
elif is_iterable(obj):
return [serialize(v) for v in obj]
return obj
class StringEnum(object):
"""Enum like class that is configured with a list of values.
This class effectively implements an enum for Elements, except for that
the actual values of the enums will be the string values.
"""
def __init__(self, *values):
for name in values:
setattr(self, name, name)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defines the ModuleTestRunnerClass."""
import unittest
class ModuleTestRunner(object):
"""Responsible for executing all test cases in a list of modules."""
def __init__(self, module_list=None, module_test_settings=None):
self.modules = module_list or []
self.settings = module_test_settings or {}
def RunAllTests(self):
"""Executes all tests present in the list of modules."""
runner = unittest.TextTestRunner()
for module in self.modules:
for setting, value in self.settings.iteritems():
try:
setattr(module, setting, value)
except AttributeError:
print '\nError running ' + str(setting)
print '\nRunning all tests in module', module.__name__
runner.run(unittest.defaultTestLoader.loadTestsFromModule(module))
| Python |
import simplejson
import cgi
class JSONFilter(object):
def __init__(self, app, mime_type='text/x-json'):
self.app = app
self.mime_type = mime_type
def __call__(self, environ, start_response):
# Read JSON POST input to jsonfilter.json if matching mime type
response = {'status': '200 OK', 'headers': []}
def json_start_response(status, headers):
response['status'] = status
response['headers'].extend(headers)
environ['jsonfilter.mime_type'] = self.mime_type
if environ.get('REQUEST_METHOD', '') == 'POST':
if environ.get('CONTENT_TYPE', '') == self.mime_type:
args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _]
data = environ['wsgi.input'].read(*map(int, args))
environ['jsonfilter.json'] = simplejson.loads(data)
res = simplejson.dumps(self.app(environ, json_start_response))
jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp')
if jsonp:
content_type = 'text/javascript'
res = ''.join(jsonp + ['(', res, ')'])
elif 'Opera' in environ.get('HTTP_USER_AGENT', ''):
# Opera has bunk XMLHttpRequest support for most mime types
content_type = 'text/plain'
else:
content_type = self.mime_type
headers = [
('Content-type', content_type),
('Content-length', len(res)),
]
headers.extend(response['headers'])
start_response(response['status'], headers)
return [res]
def factory(app, global_conf, **kw):
return JSONFilter(app, **kw)
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from scanner import make_scanner
try:
from _speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, pos)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Implementation of JSONEncoder
"""
import re
try:
from _speedups import encode_basestring_ascii as \
c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from _speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
from decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and not self.indent and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.0'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
try:
from collections import OrderedDict
except ImportError:
from ordered_dict import OrderedDict
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
**kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
) | Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile, object_pairs_hook=json.OrderedDict)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ')
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
import cgi
import urllib
import time
import random
import urlparse
import hmac
import base64
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join(str(random.randint(0, 9)) for i in range(length))
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
@staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems())
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values)
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
@staticmethod
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
@staticmethod
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
@staticmethod
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = escape(callback)
return OAuthRequest(http_method, http_url, parameters)
# util function: turn Authorization: header into parameters, has to do some unescaping
@staticmethod
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
# util function: turn url string into parameters, has to do some unescaping
@staticmethod
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return base64.b64encode(hashed.digest())
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the wavelet module."""
import unittest
import blip
import element
import ops
import wavelet
import simplejson
ROBOT_NAME = 'robot@appspot.com'
TEST_WAVELET_DATA = {
'creator': ROBOT_NAME,
'creationTime': 100,
'lastModifiedTime': 101,
'participants': [ROBOT_NAME],
'participantsRoles': {ROBOT_NAME: wavelet.Participants.ROLE_FULL},
'rootBlipId': 'blip-1',
'title': 'Title',
'waveId': 'test.com!w+g3h3im',
'waveletId': 'test.com!root+conv',
'tags': ['tag1', 'tag2'],
}
TEST_BLIP_DATA = {
'blipId': TEST_WAVELET_DATA['rootBlipId'],
'childBlipIds': [],
'content': '\ntesting',
'contributors': [TEST_WAVELET_DATA['creator'], 'robot@google.com'],
'creator': TEST_WAVELET_DATA['creator'],
'lastModifiedTime': TEST_WAVELET_DATA['lastModifiedTime'],
'parentBlipId': None,
'waveId': TEST_WAVELET_DATA['waveId'],
'elements': {},
'waveletId': TEST_WAVELET_DATA['waveletId'],
}
class TestWavelet(unittest.TestCase):
"""Tests the wavelet class."""
def setUp(self):
self.operation_queue = ops.OperationQueue()
self.all_blips = {}
self.blip = blip.Blip(TEST_BLIP_DATA,
self.all_blips,
self.operation_queue)
self.all_blips[self.blip.blip_id] = self.blip
self.wavelet = wavelet.Wavelet(TEST_WAVELET_DATA,
self.all_blips,
None,
self.operation_queue)
self.wavelet.robot_address = ROBOT_NAME
def testWaveletProperties(self):
w = self.wavelet
self.assertEquals(TEST_WAVELET_DATA['creator'], w.creator)
self.assertEquals(TEST_WAVELET_DATA['creationTime'], w.creation_time)
self.assertEquals(TEST_WAVELET_DATA['lastModifiedTime'],
w.last_modified_time)
self.assertEquals(len(TEST_WAVELET_DATA['participants']),
len(w.participants))
self.assertTrue(TEST_WAVELET_DATA['participants'][0] in w.participants)
self.assertEquals(TEST_WAVELET_DATA['rootBlipId'], w.root_blip.blip_id)
self.assertEquals(TEST_WAVELET_DATA['title'], w.title)
self.assertEquals(TEST_WAVELET_DATA['waveId'], w.wave_id)
self.assertEquals(TEST_WAVELET_DATA['waveletId'], w.wavelet_id)
self.assertEquals('test.com', w.domain)
def testWaveletMethods(self):
w = self.wavelet
reply = w.reply()
self.assertEquals(2, len(w.blips))
w.delete(reply)
self.assertEquals(1, len(w.blips))
self.assertEquals(0, len(w.data_documents))
self.wavelet.data_documents['key'] = 'value'
self.assert_('key' in w.data_documents)
self.assertEquals(1, len(w.data_documents))
for key in w.data_documents:
self.assertEquals(key, 'key')
self.assertEquals(1, len(w.data_documents.keys()))
self.wavelet.data_documents['key'] = None
self.assertEquals(0, len(w.data_documents))
num_participants = len(w.participants)
w.proxy_for('proxy').reply()
self.assertEquals(2, len(w.blips))
# check that the new proxy for participant was added
self.assertEquals(num_participants + 1, len(w.participants))
w._robot_address = ROBOT_NAME.replace('@', '+proxy@')
w.proxy_for('proxy').reply()
self.assertEquals(num_participants + 1, len(w.participants))
self.assertEquals(3, len(w.blips))
def testSetTitle(self):
self.blip._content = '\nOld title\n\nContent'
self.wavelet.title = 'New title \xd0\xb0\xd0\xb1\xd0\xb2'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals(u'\nNew title \u0430\u0431\u0432\n\nContent',
self.blip._content)
def testSetTitleAdjustRootBlipWithOneLineProperly(self):
self.blip._content = '\nOld title'
self.wavelet.title = 'New title'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals('\nNew title\n', self.blip._content)
def testSetTitleAdjustEmptyRootBlipProperly(self):
self.blip._content = '\n'
self.wavelet.title = 'New title'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals('\nNew title\n', self.blip._content)
def testTags(self):
w = self.wavelet
self.assertEquals(2, len(w.tags))
w.tags.append('tag3')
self.assertEquals(3, len(w.tags))
w.tags.append('tag3')
self.assertEquals(3, len(w.tags))
w.tags.remove('tag1')
self.assertEquals(2, len(w.tags))
self.assertEquals('tag2', w.tags[0])
def testParticipantRoles(self):
w = self.wavelet
self.assertEquals(wavelet.Participants.ROLE_FULL,
w.participants.get_role(ROBOT_NAME))
w.participants.set_role(ROBOT_NAME, wavelet.Participants.ROLE_READ_ONLY)
self.assertEquals(wavelet.Participants.ROLE_READ_ONLY,
w.participants.get_role(ROBOT_NAME))
def testSerialize(self):
self.blip.append(element.Gadget('http://test.com', {'a': 3}))
self.wavelet.title = 'A wavelet title'
self.blip.append(element.Image(url='http://www.google.com/logos/clickortreat1.gif',
width=320, height=118))
self.blip.append(element.Attachment(caption='fake', data='fake data'))
self.blip.append(element.Line(line_type='li', indent='2'))
self.blip.append('bulleted!')
self.blip.append(element.Installer(
'http://wave-skynet.appspot.com/public/extensions/areyouin/manifest.xml'))
self.wavelet.proxy_for('proxy').reply().append('hi from douwe')
inlineBlip = self.blip.insert_inline_blip(5)
inlineBlip.append('hello again!')
serialized = self.wavelet.serialize()
serialized = simplejson.dumps(serialized)
self.assertTrue(serialized.find('test.com') > 0)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the generic robot classes.
This module provides the Robot class and RobotListener interface,
as well as some helper functions for web requests and responses.
"""
import base64
import logging
import sys
try:
__import__('google3') # setup internal test environment
except ImportError:
pass
import simplejson
import blip
import events
import ops
import util
import wavelet
import errors
# We only import oauth when we need it
oauth = None
DEFAULT_PROFILE_URL = (
'http://code.google.com/apis/wave/extensions/robots/python-tutorial.html')
class Robot(object):
"""Robot metadata class.
This class holds on to basic robot information like the name and profile.
It also maintains the list of event handlers and cron jobs and
dispatches events to the appropriate handlers.
"""
def __init__(self, name, image_url='', profile_url=DEFAULT_PROFILE_URL):
"""Initializes self with robot information.
Args:
name: The name of the robot
image_url: (optional) url of an image that should be used as the avatar
for this robot.
profile_url: (optional) url of a webpage with more information about
this robot.
"""
self._handlers = {}
self._name = name
self._verification_token = None
self._st = None
self._consumer_key = None
self._consumer_secret = None
self._server_rpc_base = None
self._profile_handler = None
self._image_url = image_url
self._profile_url = profile_url
self._capability_hash = 0
@property
def name(self):
"""Returns the name of the robot."""
return self._name
@property
def image_url(self):
"""Returns the URL of the avatar image."""
return self._image_url
@property
def profile_url(self):
"""Returns the URL of an info page for the robot."""
return self._profile_url
def http_post(self, url, data, headers):
"""Execute an http post.
Monkey patch this method to use something other than
the default urllib.
Args:
url: to post to
body: post body
headers: extra headers to pass along
Returns:
response_code, returned_page
"""
import urllib2
req = urllib2.Request(url,
data=data,
headers=headers)
try:
f = urllib2.urlopen(req)
return f.code, f.read()
except urllib2.URLError, e:
return e.code, e.read()
def get_verification_token_info(self):
"""Returns the verification token and ST parameter."""
return self._verification_token, self._st
def capabilities_hash(self):
"""Return the capabilities hash as a hex string."""
return hex(self._capability_hash)
def register_handler(self, event_class, handler, context=None, filter=None):
"""Registers a handler on a specific event type.
Multiple handlers may be registered on a single event type and are
guaranteed to be called in order of registration.
The handler takes two arguments, the event object and the corresponding
wavelet.
Args:
event_class: An event to listen for from the classes defined in the
events module.
handler: A function handler which takes two arguments, the wavelet for
the event and the event object.
context: The context to provide for this handler.
filter: Depending on the event, a filter can be specified that restricts
for which values the event handler will be called from the server.
Valuable to restrict the amount of traffic send to the robot.
"""
payload = (handler, event_class, context, filter)
self._handlers.setdefault(event_class.type, []).append(payload)
if type(context) == list:
context = ','.join(context)
self._capability_hash = (self._capability_hash * 13 +
hash(event_class.type) +
hash(context) +
hash(filter)) & 0xfffffff
def set_verification_token_info(self, token, st=None):
"""Set the verification token used in the ownership verification.
/wave/robot/register starts this process up and will produce this token.
Args:
token: the token provided by /wave/robot/register.
st: optional parameter to verify the request for the token came from
the wave server.
"""
self._verification_token = token
self._st = st
def setup_oauth(self, consumer_key, consumer_secret,
server_rpc_base='http://gmodules.com/api/rpc'):
"""Configure this robot to use the oauth'd json rpc.
Args:
consumer_key: consumer key received from the verification process.
consumer_secret: secret received from the verification process.
server_rpc_base: url of the rpc gateway to use. Specify None for default.
For wave preview, http://gmodules.com/api/rpc should be used.
For wave sandbox, http://sandbox.gmodules.com/api/rpc should be used.
"""
# Import oauth inline and using __import__ for pyexe compatibility
# when oauth is not installed.
global oauth
__import__('waveapi.oauth')
oauth = sys.modules['waveapi.oauth']
self._server_rpc_base = server_rpc_base
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._oauth_signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self._oauth_consumer = oauth.OAuthConsumer(self._consumer_key,
self._consumer_secret)
def register_profile_handler(self, handler):
"""Sets the profile handler for this robot.
The profile handler will be called when a profile is needed. The handler
gets passed the name for which a profile is needed or None for the
robot itself. A dictionary with keys for name, imageUrl and
profileUrl should be returned.
"""
self._profile_handler = handler
def _hash(self, value):
"""return b64encoded sha1 hash of value."""
try:
hashlib = __import__('hashlib') # 2.5
hashed = hashlib.sha1(value)
except ImportError:
import sha # deprecated
hashed = sha.sha(value)
return base64.b64encode(hashed.digest())
def make_rpc(self, operations):
"""Make an rpc call, submitting the specified operations."""
if not oauth or not self._oauth_consumer.key:
raise errors.Error('OAuth has not been configured')
if (not type(operations) == list and
not isinstance(operations, ops.OperationQueue)):
operations = [operations]
rpcs = [op.serialize(method_prefix='wave') for op in operations]
post_body = simplejson.dumps(rpcs)
body_hash = self._hash(post_body)
params = {
'oauth_consumer_key': 'google.com:' + self._oauth_consumer.key,
'oauth_timestamp': oauth.generate_timestamp(),
'oauth_nonce': oauth.generate_nonce(),
'oauth_version': oauth.OAuthRequest.version,
'oauth_body_hash': body_hash,
}
oauth_request = oauth.OAuthRequest.from_request('POST',
self._server_rpc_base,
parameters=params)
oauth_request.sign_request(self._oauth_signature_method,
self._oauth_consumer,
None)
code, content = self.http_post(
url=oauth_request.to_url(),
data=post_body,
headers={'Content-Type': 'application/json'})
logging.info('Active URL: %s' % oauth_request.to_url())
logging.info('Active Outgoing: %s' % post_body)
if code != 200:
logging.info(oauth_request.to_url())
logging.info(content)
raise IOError('HttpError ' + str(code))
return simplejson.loads(content)
def _first_rpc_result(self, result):
"""result is returned from make_rpc. Get the first data record
or throw an exception if it was an error."""
if type(result) == list:
result = result[0]
error = result.get('error')
if error:
raise errors.Error('RPC Error' + str(error['code'])
+ ': ' + error['message'])
data = result.get('data')
if data:
return data
raise errors.Error('RPC Error: No data record.')
def capabilities_xml(self):
"""Return this robot's capabilities as an XML string."""
lines = []
for capability, payloads in self._handlers.items():
for payload in payloads:
handler, event_class, context, filter = payload
line = ' <w:capability name="%s"' % capability
if context:
if type(context) == list:
context = ','.join(context)
line += ' context="%s"' % context
if filter:
line += ' filter="%s"' % filter
line += '/>\n'
lines.append(line)
if self._consumer_key:
oauth_tag = '<w:consumer_key>%s</w:consumer_key>\n' % self._consumer_key
else:
oauth_tag = ''
return ('<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>%s</w:version>\n'
'%s'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n'
'%s'
'</w:capabilities>\n'
'</w:robot>\n') % (self.capabilities_hash(),
oauth_tag,
ops.PROTOCOL_VERSION,
'\n'.join(lines))
def profile_json(self, name=None):
"""Returns a JSON representation of the profile.
This method is called both for the basic profile of the robot and to
get a proxying for profile, in which case name is set. By default
the information supplied at registration is returned.
Use register_profile_handler to override this default behavior.
"""
if self._profile_handler:
data = self._profile_handler(name)
else:
data = {'name': self.name,
'imageUrl': self.image_url,
'profileUrl': self.profile_url}
return simplejson.dumps(data)
def _wavelet_from_json(self, json, pending_ops):
"""Construct a wavelet from the passed json.
The json should either contain a wavelet and a blips record that
define those respective object. The returned wavelet
will be constructed using the passed pending_ops
OperationQueue.
Alternatively the json can be the result of a previous
wavelet.serialize() call. In that case the blips will
be contaned in the wavelet record.
"""
if isinstance(json, basestring):
json = simplejson.loads(json)
blips = {}
for blip_id, raw_blip_data in json['blips'].items():
blips[blip_id] = blip.Blip(raw_blip_data, blips, pending_ops)
if 'wavelet' in json:
raw_wavelet_data = json['wavelet']
elif 'waveletData' in json:
raw_wavelet_data = json['waveletData']
else:
raw_wavelet_data = json
wavelet_blips = {}
wavelet_id = raw_wavelet_data['waveletId']
wave_id = raw_wavelet_data['waveId']
for blip_id, instance in blips.items():
if instance.wavelet_id == wavelet_id and instance.wave_id == wave_id:
wavelet_blips[blip_id] = instance
result = wavelet.Wavelet(raw_wavelet_data, wavelet_blips, self, pending_ops)
robot_address = json.get('robotAddress')
if robot_address:
result.robot_address = robot_address
return result
def process_events(self, json):
"""Process an incoming set of events encoded as json."""
parsed = simplejson.loads(json)
pending_ops = ops.OperationQueue()
event_wavelet = self._wavelet_from_json(parsed, pending_ops)
for event_data in parsed['events']:
for payload in self._handlers.get(event_data['type'], []):
handler, event_class, context, filter = payload
event = event_class(event_data, event_wavelet)
handler(event, event_wavelet)
pending_ops.set_capability_hash(self.capabilities_hash())
return simplejson.dumps(pending_ops.serialize())
def new_wave(self, domain, participants=None, message='', proxy_for_id=None,
submit=False):
"""Create a new wave with the initial participants on it.
A new wave is returned with its own operation queue. It the
responsibility of the caller to make sure this wave gets
submitted to the server, either by calling robot.submit() or
by calling .submit_with() on the returned wave.
Args:
domain: the domain to create the wavelet on. This should
in general correspond to the domain of the incoming
wavelet. (wavelet.domain). Exceptions are situations
where the robot is calling new_wave outside of an
event or when the server is handling multiple domains.
participants: initial participants on the wave. The robot
as the creator of the wave is always added.
message: a string that will be passed back to the robot
when the WAVELET_CREATOR event is fired. This is a
lightweight way to pass around state.
submit: if true, use the active gateway to make a round
trip to the server. This will return immediately an
actual waveid/waveletid and blipId for the root blip.
"""
operation_queue = ops.OperationQueue(proxy_for_id)
if not isinstance(message, basestring):
message = simplejson.dumps(message)
blip_data, wavelet_data = operation_queue.robot_create_wavelet(
domain=domain,
participants=participants,
message=message)
blips = {}
root_blip = blip.Blip(blip_data, blips, operation_queue)
blips[root_blip.blip_id] = root_blip
created = wavelet.Wavelet(wavelet_data,
blips=blips,
robot=self,
operation_queue=operation_queue)
if submit:
result = self._first_rpc_result(self.submit(created))
if type(result) == list:
result = result[0]
# Currently, data is sometimes wrapped in an outer 'data'
# Remove these 2 lines when that is no longer an issue.
if 'data' in result and len(result) == 2:
result = result['data']
if 'blipId' in result:
blip_data['blipId'] = result['blipId']
wavelet_data['rootBlipId'] = result['blipId']
for field in 'waveId', 'waveletId':
if field in result:
wavelet_data[field] = result[field]
blip_data[field] = result[field]
blips = {}
root_blip = blip.Blip(blip_data, blips, operation_queue)
blips[root_blip.blip_id] = root_blip
created = wavelet.Wavelet(wavelet_data,
blips=blips,
robot=self,
operation_queue=operation_queue)
return created
def fetch_wavelet(self, wave_id, wavelet_id, proxy_for_id=None):
"""Use the REST interface to fetch a wave and return it.
The returned wavelet contains a snapshot of the state of the
wavelet at that point. It can be used to modify the wavelet,
but the wavelet might change in between, so treat carefully.
Also note that the wavelet returned has its own operation
queue. It the responsibility of the caller to make sure this
wavelet gets submited to the server, either by calling
robot.submit() or by calling .submit_with() on the returned
wavelet.
"""
operation_queue = ops.OperationQueue(proxy_for_id)
operation_queue.robot_fetch_wave(wave_id, wavelet_id)
result = self._first_rpc_result(self.make_rpc(operation_queue))
return self._wavelet_from_json(result, ops.OperationQueue(proxy_for_id))
def blind_wavelet(self, json, proxy_for_id=None):
"""Construct a blind wave from a json string.
Call this method if you have a snapshot of a wave that you
want to operate on outside of an event. Since the wave might
have changed since you last saw it, you should take care to
submit operations that are as safe as possible.
Args:
json: a json object or string containing at least a key
wavelet defining the wavelet and a key blips defining the
blips in the view.
proxy_for_id: the proxying information that will be set on the wavelet's
operation queue.
Returns:
A new wavelet with its own operation queue. It the
responsibility of the caller to make sure this wavelet gets
submited to the server, either by calling robot.submit() or
by calling .submit_with() on the returned wavelet.
"""
return self._wavelet_from_json(json, ops.OperationQueue(proxy_for_id))
def submit(self, wavelet_to_submit):
"""Submit the pending operations associated with wavelet_to_submit.
Typically the wavelet will be the result of fetch_wavelet, blind_wavelet
or new_wave.
"""
pending = wavelet_to_submit.get_operation_queue()
res = self.make_rpc(pending)
pending.clear()
logging.info('submit returned:%s', res)
return res
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declares the api package."""
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various API-specific exception classes.
This module contains various specific exception classes that are raised by
the library back to the client.
"""
class Error(Exception):
"""Base library error type."""
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines event types that are sent from the wave server.
This module defines all of the event types currently supported by the wave
server. Each event type is sub classed from Event and has its own
properties depending on the type.
"""
class Context(object):
"""Specifies constants representing different context requests."""
#: Requests the root blip.
ROOT = 'ROOT'
#: Requests the parent blip of the event blip.
PARENT = 'PARENT'
#: Requests the siblings blip of the event blip.
SIBLINGS = 'SIBLINGS'
#: Requests the child blips of the event blip.
CHILDREN = 'CHILDREN'
#: Requests the event blip itself.
SELF = 'SELF'
#: Requests all of the blips of the event wavelet.
ALL = 'ALL'
class Event(object):
"""Object describing a single event.
Attributes:
modified_by: Participant id that caused this event.
timestamp: Timestamp that this event occurred on the server.
type: Type string of this event.
properties: Dictionary of all extra properties. Typically the derrived
event type should have these explicitly set as attributes, but
experimental features might appear in properties before that.
blip_id: The blip_id of the blip for blip related events or the root
blip for wavelet related events.
blip: If available, the blip with id equal to the events blip_id.
proxying_for: If available, the proxyingFor id of the robot that caused the
event.
"""
def __init__(self, json, wavelet):
"""Inits this event with JSON data.
Args:
json: JSON data from Wave server.
"""
self.modified_by = json.get('modifiedBy')
self.timestamp = json.get('timestamp', 0)
self.type = json.get('type')
self.raw_data = json
self.properties = json.get('properties', {})
self.blip_id = self.properties.get('blipId')
self.blip = wavelet.blips.get(self.blip_id)
self.proxying_for = json.get('proxyingFor')
class WaveletBlipCreated(Event):
"""Event triggered when a new blip is created.
Attributes:
new_blip_id: The id of the newly created blip.
new_blip: If in context, the actual new blip.
"""
type = 'WAVELET_BLIP_CREATED'
def __init__(self, json, wavelet):
super(WaveletBlipCreated, self).__init__(json, wavelet)
self.new_blip_id = self.properties['newBlipId']
self.new_blip = wavelet.blips.get(self.new_blip_id)
class WaveletBlipRemoved(Event):
"""Event triggered when a new blip is removed.
Attributes:
removed_blip_id: the id of the removed blip
removed_blip: if in context, the removed blip
"""
type = 'WAVELET_BLIP_REMOVED'
def __init__(self, json, wavelet):
super(WaveletBlipRemoved, self).__init__(json, wavelet)
self.removed_blip_id = self.properties['removedBlipId']
self.removed_blip = wavelet.blips.get(self.removed_blip_id)
class WaveletParticipantsChanged(Event):
"""Event triggered when the participants on a wave change.
Attributes:
participants_added: List of participants added.
participants_removed: List of participants removed.
"""
type = 'WAVELET_PARTICIPANTS_CHANGED'
def __init__(self, json, wavelet):
super(WaveletParticipantsChanged, self).__init__(json, wavelet)
self.participants_added = self.properties['participantsAdded']
self.participants_removed = self.properties['participantsRemoved']
class WaveletSelfAdded(Event):
"""Event triggered when the robot is added to the wavelet."""
type = 'WAVELET_SELF_ADDED'
class WaveletSelfRemoved(Event):
"""Event triggered when the robot is removed from the wavelet."""
type = 'WAVELET_SELF_REMOVED'
class WaveletTitleChanged(Event):
"""Event triggered when the title of the wavelet has changed.
Attributes:
title: The new title.
"""
type = 'WAVELET_TITLE_CHANGED'
def __init__(self, json, wavelet):
super(WaveletTitleChanged, self).__init__(json, wavelet)
self.title = self.properties['title']
class BlipContributorsChanged(Event):
"""Event triggered when the contributors to this blip change.
Attributes:
contributors_added: List of contributors that were added.
contributors_removed: List of contributors that were removed.
"""
type = 'BLIP_CONTRIBUTORS_CHANGED'
def __init__(self, json, wavelet):
super(BlipContributorsChanged, self).__init__(json, wavelet)
self.contibutors_added = self.properties['contributorsAdded']
self.contibutors_removed = self.properties['contributorsRemoved']
class BlipSubmitted(Event):
"""Event triggered when a blip is submitted."""
type = 'BLIP_SUBMITTED'
class DocumentChanged(Event):
"""Event triggered when a document is changed.
This event is fired after any changes in the document and should be used
carefully to keep the amount of traffic to the robot reasonable. Use
filters where appropriate.
"""
type = 'DOCUMENT_CHANGED'
class FormButtonClicked(Event):
"""Event triggered when a form button is clicked.
Attributes:
button_name: The name of the button that was clicked.
"""
type = 'FORM_BUTTON_CLICKED'
def __init__(self, json, wavelet):
super(FormButtonClicked, self).__init__(json, wavelet)
self.button_name = self.properties['buttonName']
class GadgetStateChanged(Event):
"""Event triggered when the state of a gadget changes.
Attributes:
index: The index of the gadget that changed in the document.
old_state: The old state of the gadget.
"""
type = 'GADGET_STATE_CHANGED'
def __init__(self, json, wavelet):
super(GadgetStateChanged, self).__init__(json, wavelet)
self.index = self.properties['index']
self.old_state = self.properties['oldState']
class AnnotatedTextChanged(Event):
"""Event triggered when text with an annotation has changed.
This is mainly useful in combination with a filter on the
name of the annotation.
Attributes:
name: The name of the annotation.
value: The value of the annotation that changed.
"""
type = 'ANNOTATED_TEXT_CHANGED'
def __init__(self, json, wavelet):
super(AnnotatedTextChanged, self).__init__(json, wavelet)
self.name = self.properties['name']
self.value = self.properties.get('value')
class OperationError(Event):
"""Triggered when an event on the server occurred.
Attributes:
operation_id: The operation id of the failing operation.
error_message: More information as to what went wrong.
"""
type = 'OPERATION_ERROR'
def __init__(self, json, wavelet):
super(OperationError, self).__init__(json, wavelet)
self.operation_id = self.properties['operationId']
self.error_message = self.properties['message']
class WaveletCreated(Event):
"""Triggered when a new wavelet is created.
This event is only triggered if the robot creates a new
wavelet and can be used to initialize the newly created wave.
wavelets created by other participants remain invisible
to the robot until the robot is added to the wave in
which case WaveletSelfAdded is triggered.
Attributes:
message: Whatever string was passed into the new_wave
call as message (if any).
"""
type = 'WAVELET_CREATED'
def __init__(self, json, wavelet):
super(WaveletCreated, self).__init__(json, wavelet)
self.message = self.properties['message']
class WaveletFetched(Event):
"""Triggered when a new wavelet is fetched.
This event is triggered after a robot requests to
see another wavelet. The robot has to be on the other
wavelet already.
Attributes:
message: Whatever string was passed into the new_wave
call as message (if any).
"""
type = 'WAVELET_FETCHED'
def __init__(self, json, wavelet):
super(WaveletFetched, self).__init__(json, wavelet)
self.message = self.properties['message']
class WaveletTagsChanged(Event):
"""Event triggered when the tags on a wavelet change."""
type = 'WAVELET_TAGS_CHANGED'
def __init__(self, json, wavelet):
super(WaveletTagsChanged, self).__init__(json, wavelet)
def is_event(cls):
"""Returns whether the passed class is an event."""
try:
if not issubclass(cls, Event):
return False
return hasattr(cls, 'type')
except TypeError:
return False
ALL = [item for item in globals().copy().values() if is_event(item)]
| Python |
#! /usr/bin/python
# -*- coding: UTF-8 -*-
from notifiy.robot import create_robot
if __name__ == '__main__':
create_robot()
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from notifiy.home import Home
from notifiy.proc import Process
from notifiy.proc_phone import PhoneProcess
from notifiy.receive_email import ReceiveEmail
if __name__ == "__main__":
run_wsgi_app(webapp.WSGIApplication([ ('/', Home),
('/proc/.*', Process),
('/phone/.*', PhoneProcess),
('/_ah/mail/.+', ReceiveEmail) ]))
| Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class Index(webapp.RequestHandler):
def get(self):
self.redirect('%s/index.html' % self.request.path)
if __name__ == "__main__":
run_wsgi_app(webapp.WSGIApplication([ ('.*', Index), ]))
| Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#
# Generate and deploy the jar and associated files to the Sonatype maven repository.
#
import os, re, tempfile, subprocess #, sys, datetime, zipfile
# Location of the source file that defines the current version
VERSION_FILE = '../src/com/caverock/androidsvg/SVG.java'
# Version regex
VERSION_RE = '\sVERSION\s*=\s*"([\d.]+)"'
# Source pom file
ORIG_POM_FILE = 'src-pom.xml'
# Regex for finding the place in the pom file to insert the version number
POM_VERSION_RE = '{{VERSION}}'
# The jar file to be deployed
JAR_FILE = '../bin/androidsvg.jar'
# The dummy sources and javadoc jars
SOURCES_JAR_FILE = 'androidsvg-sources.jar'
JAVADOC_JAR_FILE = 'androidsvg-javadoc.jar'
def main():
# Get the current version number of the library
libraryVersion = get_current_version()
go = raw_input('\nDo maven deploy for version '+libraryVersion+'? (y/N): ')
if not go in ['Y','y']:
exit()
# Get GPG passphrase
#passphrase = raw_input('GPG passphrase: ')
#if passphrase == '':
# print "Exiting: need passphrase."
# exit()
# Create a temporary file to hold the generated pom file
print 'Creating POM file for this version...'
tempPomFile = tempfile.NamedTemporaryFile(suffix='.pom.xml', delete=False)
#print tempPomFile.name
# Write out a new pom file with the version number set to the latest version
srcPomFile = read(ORIG_POM_FILE)
tempPomFile.write(re.sub(POM_VERSION_RE, libraryVersion, srcPomFile))
tempPomFile.close()
# Sign and deploy the artifact
print '\nSigning and deploying artifact...'
basecmd = 'mvn gpg:sign-and-deploy-file'
basecmd += ' -DpomFile=' + tempPomFile.name
basecmd += ' -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/'
basecmd += ' -DrepositoryId=sonatype-nexus-staging'
#basecmd += ' -Dpassphrase=' + passphrase
cmd = basecmd
cmd += ' -Dfile=' + os.path.realpath(JAR_FILE)
print cmd
os.system(cmd)
# Sign and deploy the dummy sources
print '\nSigning and deploying sources jar...'
cmd = basecmd
cmd += ' -Dfile=' + os.path.realpath(SOURCES_JAR_FILE)
cmd += ' -Dclassifier=sources'
print cmd
os.system(cmd)
# Sign and deploy the dummy javadoc
print '\nSigning and deploying javadoc jar...'
cmd = basecmd
cmd += ' -Dfile=' + os.path.realpath(JAVADOC_JAR_FILE)
cmd += ' -Dclassifier=javadoc'
print cmd
os.system(cmd)
# Done
print '\nDone!'
def read(src):
file = open(os.path.realpath(src), "rb")
str = file.read()
file.close()
return str
def get_current_version():
versionFile = read(VERSION_FILE)
m = re.search(VERSION_RE, versionFile)
if (m):
return m.group(1)
else:
return ""
def error(msg):
print "ERROR: "+ msg
exit()
if __name__ == "__main__":
main()
| Python |
import Download
pagelist=['http://www.economist.com/']
print("Starting ....")
crawler = Download.crawler('')
crawler.crawl(pagelist)
| Python |
import urllib2
from BeautifulSoup import BeautifulSoup
from urlparse import urljoin
class crawler(object):
"""docstring for crawler"""
ignorewords=set(['the','of','to','and','a','in','is','it'])
index=dict()
indexFileName="F:\Programming\Sublime\Web Crawler\Index.Txt"
def __init__(self, arg):
super(crawler, self).__init__()
self.arg = arg
self.loadIndex()
return
def crawl(self,pages,depth=2):
temp=[]
temp.extend(pages)
print("begin of loop0")
for i in range(depth):
print("Crawling ..."+str(i))
print("Pages Length"+str(len(temp)))
newpages=[]
print("begin of loop1")
for page in temp:
print("Looking for links in "+page)
try:
c=urllib2.urlopen(page)
except:
print "Could not open %s" % page
continue
soup=BeautifulSoup(c.read( ))
self.addtoindex(page,soup)
links=soup('a')
print("begin of loop2")
for link in links:
if ('href' in dict(link.attrs)):
url=urljoin(page,link['href'])
print("Checking " + url)
if url.find("'")!=-1:
continue
url=url.split('#')[0] # remove location portion
if url[0:4]=='http' and not self.isindexed(url):
newpages.append(url)
print("end of loop2")
print("New pages Length:" + str(len(newpages)))
temp=newpages
print("end of loop1")
self.dbcommit()
def addtoindex(self,page,soup) :
print ("Indexing "+page)
self.index[page]=len(self.index)+1
def addlinkref(self,page,url,linkText):
print('')
def dbcommit(self):
print("Saving index to " + self.indexFileName)
f=open(self.indexFileName,'w')
for page in self.index.keys():
print(page+"\n")
f.write(page + "\n")
f.close()
def isindexed(self,url):
return (self.index.has_key(url))
def loadIndex(self):
print("Loading Index ...")
try:
f=open(self.indexFileName,'r')
except:
f=open(self.indexFileName,'r+')
print("Warning: Index was empty")
lines = (line.rstrip() for line in f)
lines = (line for line in lines if line)
for line in lines:
self.index[line]=len(self.index)+1
f.close()
return
| Python |
import urllib
import re
urlArg = 'http://www.yahoo.com'
filehandle = urllib.urlopen(urlArg)
for lines in filehandle.readlines():
m = re.search('http\://[\w\./]+/\w+',lines)
if m:
print m.group(0)
filehandle.close()
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main program for Rietveld.
This is also a template for running a Django app under Google App
Engine, especially when using a newer version of Django than provided
in the App Engine standard library.
The site-specific code is all in other files: urls.py, models.py,
views.py, settings.py.
"""
# Standard Python imports.
import os
import sys
import logging
# Log a message each time this module get loaded.
logging.info('Loading %s, app version = %s',
__name__, os.getenv('CURRENT_VERSION_ID'))
import appengine_config
# AppEngine imports.
from google.appengine.ext.webapp import util
# Import webapp.template. This makes most Django setup issues go away.
from google.appengine.ext.webapp import template
# Import various parts of Django.
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
import django.forms
def log_exception(*args, **kwds):
"""Django signal handler to log an exception."""
cls, err = sys.exc_info()[:2]
logging.exception('Exception in request: %s: %s', cls.__name__, err)
# Log all exceptions detected by Django.
django.core.signals.got_request_exception.connect(log_exception)
# Unregister Django's default rollback event handler.
django.core.signals.got_request_exception.disconnect(
django.db._rollback_on_exception)
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
def real_main():
"""Main program."""
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
def profile_main():
"""Main program for profiling."""
import cProfile
import pstats
import StringIO
prof = cProfile.Profile()
prof = prof.runctx('real_main()', globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
# stats.strip_dirs() # Don't; too many modules are named __init__.py.
stats.sort_stats('time') # 'time', 'cumulative' or 'calls'
stats.print_stats() # Optional arg: how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
print '\n<hr>'
print '<h1>Profile</h1>'
print '<pre>'
print stream.getvalue()[:1000000]
print '</pre>'
# Set this to profile_main to enable profiling.
main = real_main
if __name__ == '__main__':
main()
| Python |
# Removes duplicate nicknames (issue99).
#
# To run this script:
# - Make sure App Engine library (incl. yaml) is in PYTHONPATH.
# - Make sure that the remote API is included in app.yaml.
# - Run "tools/appengine_console.py APP_ID".
# - Import this module.
# - update_accounts.run() updates accounts.
# - Use the other two functions to fetch accounts or find duplicates
# without any changes to the datastore.
from google.appengine.ext import db
from codereview import models
def fetch_accounts():
query = models.Account.all()
accounts = {}
results = query.fetch(100)
while results:
last = None
for account in results:
if account.lower_nickname in accounts:
accounts[account.lower_nickname].append(account)
else:
accounts[account.lower_nickname] = [account]
last = account
if last is None:
break
results = models.Account.all().filter('__key__ >',
last.key()).fetch(100)
return accounts
def find_duplicates(accounts):
tbd = []
while accounts:
_, entries = accounts.popitem()
if len(entries) > 1:
# update accounts, except the fist: it's the lucky one
for num, account in enumerate(entries[1:]):
account.nickname = '%s%d' % (account.nickname, num+1)
account.lower_nickname = account.nickname.lower()
account.fresh = True # display "change nickname..."
tbd.append(account)
return tbd
def run():
accounts = fetch_accounts()
print '%d accounts fetched' % len(accounts)
tbd = find_duplicates(accounts)
print 'Updating %d accounts' % len(tbd)
db.put(tbd)
print 'Updated accounts:'
for account in tbd:
print ' %s' % account.email
| Python |
"""Configuration."""
import logging
import os
import re
from google.appengine.ext.appstats import recording
logging.info('Loading %s from %s', __name__, __file__)
# Custom webapp middleware to add Appstats.
def webapp_add_wsgi_middleware(app):
app = recording.appstats_wsgi_middleware(app)
return app
# Custom Appstats path normalization.
def appstats_normalize_path(path):
if path.startswith('/user/'):
return '/user/X'
if path.startswith('/user_popup/'):
return '/user_popup/X'
if '/diff/' in path:
return '/X/diff/...'
if '/diff2/' in path:
return '/X/diff2/...'
if '/patch/' in path:
return '/X/patch/...'
if path.startswith('/rss/'):
i = path.find('/', 5)
if i > 0:
return path[:i] + '/X'
return re.sub(r'\d+', 'X', path)
# Segregate Appstats by runtime (python vs. python27).
appstats_KEY_NAMESPACE = '__appstats_%s__' % os.getenv('APPENGINE_RUNTIME')
# Django 1.2+ requires DJANGO_SETTINGS_MODULE environment variable to be set
# http://code.google.com/appengine/docs/python/tools/libraries.html#Django
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# NOTE: All "main" scripts must import webapp.template before django.
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils."""
import os
from google.appengine.ext import testbed
from django.test import TestCase as _TestCase
FILES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files')
class TestCase(_TestCase):
"""Customized Django TestCase.
This class disables the setup of Django features that are not
available on App Engine (e.g. fixture loading). And it initializes
the Testbad class provided by the App Engine SDK.
"""
def _fixture_setup(self): # defined in django.test.TestCase
pass
def _fixture_teardown(self): # defined in django.test.TestCase
pass
def setUp(self):
super(TestCase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_user_stub()
def tearDown(self):
self.testbed.deactivate()
super(TestCase, self).tearDown()
def login(self, email):
"""Logs in a user identified by email."""
os.environ['USER_EMAIL'] = email
def logout(self):
"""Logs the user out."""
os.environ['USER_EMAIL'] = ''
def load_file(fname):
"""Read file and return it's content."""
return open(os.path.join(FILES_DIR, fname)).read()
| Python |
#!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test case runner."""
import os
import re
import sys
import unittest
TESTS_DIR = os.path.dirname(__file__)
def collect_test_modules():
"""Collects and yields test modules."""
for fname in os.listdir(TESTS_DIR):
if not re.match(r'test_.*\.py$', fname):
continue
try:
yield __import__(fname[:-3])
except ImportError, err:
sys.stderr.write('Failed to import %s: %s\n' % (fname, err))
raise StopIteration
def setup_test_env(sdk_path):
"""Sets up App Engine/Django test environment."""
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
# google.appengine.ext.testbed.Testbed should set SERVER_SOFTWARE
# and APPLICATION_ID environment variables, but we need them
# earlier when Django import settings.py.
os.environ['SERVER_SOFTWARE'] = 'DevTestrunner' # used in settings.py
os.environ['APPLICATION_ID'] = 'test-codereview' # used in settings.py
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# Provide a dummy value for REQUEST_ID_HASH in environment. This is
# needed for now to make appstats happy (see comments on
# http://codereview.appspot.com/5305060/).
os.environ['REQUEST_ID_HASH'] = 'testing'
from google.appengine.dist import use_library
use_library('django', '1.2')
def main():
"""Builds test suite from collected test modules and runs it."""
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for module in collect_test_modules():
suite.addTests(loader.loadTestsFromModule(module))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
if len(sys.argv) != 2:
sdk_path = os.path.join('..', 'google_appengine')
if not os.path.exists(os.path.join(sdk_path, 'dev_appserver.py')):
sys.stderr.write('usage: %s SDK_PATH\n' % sys.argv[0])
sys.exit(1)
else:
sdk_path = sys.argv[1]
setup_test_env(sdk_path)
main()
| Python |
# Re-puts entities of a given type, to set newly added properties.
#
# To run this script:
# - Make sure App Engine library (incl. yaml) is in PYTHONPATH.
# - Make sure that the remote API is included in app.yaml.
# - Run "tools/appengine_console.py APP_ID".
# - Import this module.
# - Import models from codereview.
# - update_entities.run(models.Issue) updates issues.
import logging
from google.appengine.ext import db
from codereview import models
import urllib2
def run(model_class, batch_size=100, last_key=None):
while True:
q = model_class.all()
if last_key:
q.filter('__key__ >', last_key)
q.order('__key__')
this_batch_size = batch_size
while True:
try:
try:
batch = q.fetch(this_batch_size)
except urllib2.URLError, err:
if 'timed out' in str(err):
raise db.Timeout
else:
raise
break
except db.Timeout:
logging.warn("Query timed out, retrying")
if this_batch_size == 1:
logging.critical("Unable to update entities, aborting")
return
this_batch_size //= 2
if not batch:
break
keys = None
while not keys:
try:
keys = db.put(batch)
except db.Timeout:
logging.warn("Put timed out, retrying")
last_key = keys[-1]
print "Updated %d records" % (len(keys),)
| Python |
#!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import code
import getpass
import logging
import optparse
import os
import re
import sys
ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
LIB = os.path.join(ROOT, '..', 'google_appengine', 'lib')
sys.path.insert(0, os.path.join(ROOT, '..', 'google_appengine'))
sys.path.append(os.path.join(LIB, 'django_1_2'))
sys.path.append(os.path.join(LIB, 'fancy_urllib'))
sys.path.append(os.path.join(LIB, 'simplejson'))
sys.path.append(os.path.join(LIB, 'webob'))
sys.path.append(os.path.join(LIB, 'yaml', 'lib'))
sys.path.append(ROOT)
from google.appengine.ext.remote_api import remote_api_stub
import yaml
def default_auth_func():
user = os.environ.get('EMAIL_ADDRESS')
if user:
print('User: %s' % user)
else:
user = raw_input('Username:')
return user, getpass.getpass('Password:')
def smart_auth_func():
"""Try to guess first."""
try:
return os.environ['EMAIL_ADDRESS'], open('.pwd').readline().strip()
except (KeyError, IOError):
return default_auth_func()
def default_app_id(directory):
return yaml.load(open(os.path.join(directory, 'app.yaml')))['application']
def setup_env(app_id, host=None, auth_func=None):
"""Setup remote access to a GAE instance."""
auth_func = auth_func or smart_auth_func
host = host or '%s.appspot.com' % app_id
# pylint: disable=W0612
from google.appengine.api import memcache
from google.appengine.api.users import User
from google.appengine.ext import db
remote_api_stub.ConfigureRemoteDatastore(
app_id, '/_ah/remote_api', auth_func, host)
# Initialize environment.
os.environ['SERVER_SOFTWARE'] = ''
import appengine_config
# Create shortcuts.
import codereview
from codereview import models, views
# Symbols presented to the user.
predefined_vars = locals().copy()
del predefined_vars['appengine_config']
del predefined_vars['auth_func']
# Load all the models.
for i in dir(models):
if re.match(r'[A-Z][a-z]', i[:2]):
predefined_vars[i] = getattr(models, i)
return predefined_vars
def main():
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='count')
options, args = parser.parse_args()
if not args:
app_id = default_app_id(ROOT)
else:
app_id = args[0]
host = None
if len(args) > 1:
host = args[1]
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
predefined_vars = setup_env(app_id, host)
prompt = (
'App Engine interactive console for "%s".\n'
'Available symbols:\n'
' %s\n') % (app_id, ', '.join(sorted(predefined_vars)))
code.interact(prompt, None, predefined_vars)
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
import os
import sys
from optparse import HelpFormatter
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT)
import upload
class GCWikiHelpFormatter (HelpFormatter):
"""Format help with wiki markup for Google Code."""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
self._dl_open = False
def indent(self):
self._pending = 'INDENT'
HelpFormatter.indent(self)
def dedent(self):
self._pending = 'DEDENT'
HelpFormatter.dedent(self)
def format_usage(self, usage):
return "*Usage summary:* `%s`\n" % usage
def format_heading(self, heading):
if self._dl_open:
pre = '\n</dl>\n'
else:
pre = ''
markup = '='*(self.current_indent+2)
self._dl_open = True
return "%s%s %s %s\n<dl>\n" % (pre, markup, heading, markup)
def format_option(self, option):
result = []
opts = self.option_strings[option]
result.append('<dt>`%s`</dt>\n' % opts)
if option.help:
help_text = '<dd>%s</dd>\n' % self.expand_default(option)
result.append(help_text)
return ''.join(result)
def main():
upload.parser.formatter = GCWikiHelpFormatter()
print HEADER
print upload.parser.format_option_help()
print '</dl>' # TODO: Formatter should do this
print FOOTER
print
HEADER = """#summary upload.py usage and options.
<wiki:comment>
THIS PAGE IS AUTOGENERATED. DO NOT EDIT.
To update this page run tools/uploadopts2wiki.py
</wiki:comment>
= upload.py Usage =
[http://codereview.appspot.com/static/upload.py upload.py] is a tool
for uploading diffs from a version control system to the codereview app.
*Usage summary:*
{{{upload.py [options] [-- diff_options]}}}
Diff options are passed to the diff command of the underlying system.
*Supported version control systems:*
* Git
* Mercurial
* Subversion
* Perforce
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
FOOTER = """\
==== Running upload.py from perforce GUIs ====
You can right click on a perforce changelist and create a new Rietveld code review by adding a custom tool with the following settings:
Application: python Arguments: /PATH/TO/upload.py -s MY_SERVER --p4_changelist %p --p4_port $p --p4_user $u --p4_client $c Start In: empty. Check "Add to applicable context menus", "Run tool in terminal window" (or system equivalent), and "Ignore P4CONFIG files".
Replace /PATH/TO/ with the location of upload.py, and MY_SERVER with the rietveld code review server. See screenshot [http://alexmccarthy.net/Rietveld%20-%20P4V%20Custom%20Tool%20Settings.png here].
"""
if __name__ == '__main__':
main()
| Python |
# Copyright 2008-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Top-level URL mappings for Rietveld."""
# NOTE: Must import *, since Django looks for things here, e.g. handler500.
from django.conf.urls.defaults import *
# If you don't want to run Rietveld from the root level, add the
# subdirectory as shown in the following example:
#
# url(r'subpath/', include('codereview.urls')),
#
urlpatterns = patterns(
'',
url(r'', include('codereview.urls')),
)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal Django settings."""
import os
from google.appengine.api import app_identity
# Banner for e.g. planned downtime announcements
## SPECIAL_BANNER = """\
## Rietveld will be down for maintenance on
## Thursday November 17
## from
## <a href="http://www.timeanddate.com/worldclock/fixedtime.html?iso=20111117T17&ah=6">
## 17:00 - 23:00 UTC
## </a>
## """
APPEND_SLASH = False
DEBUG = os.environ['SERVER_SOFTWARE'].startswith('Dev')
INSTALLED_APPS = (
'codereview',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'codereview.middleware.AddUserToRequestMiddleware',
'codereview.middleware.PropagateExceptionMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
)
TEMPLATE_DEBUG = DEBUG
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
)
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
FILE_UPLOAD_MAX_MEMORY_SIZE = 1048576 # 1 MB
MEDIA_URL = '/static/'
appid = app_identity.get_application_id()
RIETVELD_INCOMING_MAIL_ADDRESS = ('reply@%s.appspotmail.com' % appid)
RIETVELD_INCOMING_MAIL_MAX_SIZE = 500 * 1024 # 500K
RIETVELD_REVISION = '<unknown>'
try:
RIETVELD_REVISION = open(
os.path.join(os.path.dirname(__file__), 'REVISION')
).read()
except:
pass
UPLOAD_PY_SOURCE = os.path.join(os.path.dirname(__file__), 'upload.py')
# Default values for patch rendering
DEFAULT_CONTEXT = 10
DEFAULT_COLUMN_WIDTH = 80
MIN_COLUMN_WIDTH = 3
MAX_COLUMN_WIDTH = 2000
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Rietveld."""
import logging
import md5
import os
import re
import time
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import db
from django.conf import settings
from codereview import patching
from codereview import utils
from codereview.exceptions import FetchError
CONTEXT_CHOICES = (3, 10, 25, 50, 75, 100)
### GQL query cache ###
_query_cache = {}
def gql(cls, clause, *args, **kwds):
"""Return a query object, from the cache if possible.
Args:
cls: a db.Model subclass.
clause: a query clause, e.g. 'WHERE draft = TRUE'.
*args, **kwds: positional and keyword arguments to be bound to the query.
Returns:
A db.GqlQuery instance corresponding to the query with *args and
**kwds bound to the query.
"""
query_string = 'SELECT * FROM %s %s' % (cls.kind(), clause)
query = _query_cache.get(query_string)
if query is None:
_query_cache[query_string] = query = db.GqlQuery(query_string)
query.bind(*args, **kwds)
return query
### Issues, PatchSets, Patches, Contents, Comments, Messages ###
class Issue(db.Model):
"""The major top-level entity.
It has one or more PatchSets as its descendants.
"""
subject = db.StringProperty(required=True)
description = db.TextProperty()
#: in Subversion - repository path (URL) for files in patch set
base = db.StringProperty()
#: if True then base files for patches were uploaded with upload.py
#: (if False - then Rietveld attempts to download them from server)
local_base = db.BooleanProperty(default=False)
repo_guid = db.StringProperty()
owner = db.UserProperty(auto_current_user_add=True, required=True)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
reviewers = db.ListProperty(db.Email)
cc = db.ListProperty(db.Email)
closed = db.BooleanProperty(default=False)
private = db.BooleanProperty(default=False)
n_comments = db.IntegerProperty()
_is_starred = None
@property
def is_starred(self):
"""Whether the current user has this issue starred."""
if self._is_starred is not None:
return self._is_starred
account = Account.current_user_account
self._is_starred = account is not None and self.key().id() in account.stars
return self._is_starred
def user_can_edit(self, user):
"""Return true if the given user has permission to edit this issue."""
return user == self.owner
@property
def edit_allowed(self):
"""Whether the current user can edit this issue."""
account = Account.current_user_account
if account is None:
return False
return self.user_can_edit(account.user)
def update_comment_count(self, n):
"""Increment the n_comments property by n.
If n_comments in None, compute the count through a query. (This
is a transitional strategy while the database contains Issues
created using a previous version of the schema.)
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
self.n_comments += n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, it is computed through a query, and stored,
using n_comments as a cache.
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
return self.n_comments
def _get_num_comments(self):
"""Helper to compute the number of comments through a query."""
return gql(Comment,
'WHERE ANCESTOR IS :1 AND draft = FALSE',
self).count()
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this issue for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
class PatchSet(db.Model):
"""A set of patchset uploaded together.
This is a descendant of an Issue and has Patches as descendants.
"""
issue = db.ReferenceProperty(Issue) # == parent
message = db.StringProperty()
data = db.BlobProperty()
url = db.LinkProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
n_comments = db.IntegerProperty(default=0)
def update_comment_count(self, n):
"""Increment the n_comments property by n."""
self.n_comments = self.num_comments + n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, 0 is returned.
"""
# For older patchsets n_comments is None.
return self.n_comments or 0
class Message(db.Model):
"""A copy of a message sent out in email.
This is a descendant of an Issue.
"""
issue = db.ReferenceProperty(Issue) # == parent
subject = db.StringProperty()
sender = db.EmailProperty()
recipients = db.ListProperty(db.Email)
date = db.DateTimeProperty(auto_now_add=True)
text = db.TextProperty()
draft = db.BooleanProperty(default=False)
in_reply_to = db.SelfReferenceProperty()
_approval = None
_disapproval = None
def find(self, text):
"""Returns True when the message says text and is not written by the issue owner."""
# Must not be issue owner.
# Must contain text in a line that doesn't start with '>'.
return self.issue.owner.email() != self.sender and any(
True for line in self.text.lower().splitlines()
if not line.strip().startswith('>') and text in line)
@property
def approval(self):
"""Is True when the message represents an approval of the review."""
if self._approval is None:
self._approval = self.find('lgtm') and not self.find('not lgtm')
return self._approval
@property
def disapproval(self):
"""Is True when the message represents a disapproval of the review."""
if self._disapproval is None:
self._disapproval = self.find('not lgtm')
return self._disapproval
class Content(db.Model):
"""The content of a text file.
This is a descendant of a Patch.
"""
# parent => Patch
text = db.TextProperty()
data = db.BlobProperty()
# Checksum over text or data depending on the type of this content.
checksum = db.TextProperty()
is_uploaded = db.BooleanProperty(default=False)
is_bad = db.BooleanProperty(default=False)
file_too_large = db.BooleanProperty(default=False)
@property
def lines(self):
"""The text split into lines, retaining line endings."""
if not self.text:
return []
return self.text.splitlines(True)
class Patch(db.Model):
"""A single patch, i.e. a set of changes to a single file.
This is a descendant of a PatchSet.
"""
patchset = db.ReferenceProperty(PatchSet) # == parent
filename = db.StringProperty()
status = db.StringProperty() # 'A', 'A +', 'M', 'D' etc
text = db.TextProperty()
content = db.ReferenceProperty(Content)
patched_content = db.ReferenceProperty(Content, collection_name='patch2_set')
is_binary = db.BooleanProperty(default=False)
# Ids of patchsets that have a different version of this file.
delta = db.ListProperty(int)
delta_calculated = db.BooleanProperty(default=False)
_lines = None
@property
def lines(self):
"""The patch split into lines, retaining line endings.
The value is cached.
"""
if self._lines is not None:
return self._lines
if not self.text:
lines = []
else:
lines = self.text.splitlines(True)
self._lines = lines
return lines
_property_changes = None
@property
def property_changes(self):
"""The property changes split into lines.
The value is cached.
"""
if self._property_changes != None:
return self._property_changes
self._property_changes = []
match = re.search('^Property changes on.*\n'+'_'*67+'$', self.text,
re.MULTILINE)
if match:
self._property_changes = self.text[match.end():].splitlines()
return self._property_changes
_num_added = None
@property
def num_added(self):
"""The number of line additions in this patch.
The value is cached.
"""
if self._num_added is None:
self._num_added = self.count_startswith('+') - 1
return self._num_added
_num_removed = None
@property
def num_removed(self):
"""The number of line removals in this patch.
The value is cached.
"""
if self._num_removed is None:
self._num_removed = self.count_startswith('-') - 1
return self._num_removed
_num_chunks = None
@property
def num_chunks(self):
"""The number of 'chunks' in this patch.
A chunk is a block of lines starting with '@@'.
The value is cached.
"""
if self._num_chunks is None:
self._num_chunks = self.count_startswith('@@')
return self._num_chunks
_num_comments = None
@property
def num_comments(self):
"""The number of non-draft comments for this patch.
The value is cached.
"""
if self._num_comments is None:
self._num_comments = gql(Comment,
'WHERE patch = :1 AND draft = FALSE',
self).count()
return self._num_comments
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this patch for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE patch = :1 AND draft = TRUE AND author = :2',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
def count_startswith(self, prefix):
"""Returns the number of lines with the specified prefix."""
return len([l for l in self.lines if l.startswith(prefix)])
def get_content(self):
"""Get self.content, or fetch it if necessary.
This is the content of the file to which this patch is relative.
Returns:
a Content instance.
Raises:
FetchError: If there was a problem fetching it.
"""
try:
if self.content is not None:
if self.content.is_bad:
msg = 'Bad content. Try to upload again.'
logging.warn('Patch.get_content: %s', msg)
raise FetchError(msg)
if self.content.is_uploaded and self.content.text == None:
msg = 'Upload in progress.'
logging.warn('Patch.get_content: %s', msg)
raise FetchError(msg)
else:
return self.content
except db.Error:
# This may happen when a Content entity was deleted behind our back.
self.content = None
content = self.fetch_base()
content.put()
self.content = content
self.put()
return content
def get_patched_content(self):
"""Get self.patched_content, computing it if necessary.
This is the content of the file after applying this patch.
Returns:
a Content instance.
Raises:
FetchError: If there was a problem fetching the old content.
"""
try:
if self.patched_content is not None:
return self.patched_content
except db.Error:
# This may happen when a Content entity was deleted behind our back.
self.patched_content = None
old_lines = self.get_content().text.splitlines(True)
logging.info('Creating patched_content for %s', self.filename)
chunks = patching.ParsePatchToChunks(self.lines, self.filename)
new_lines = []
for _, _, new in patching.PatchChunks(old_lines, chunks):
new_lines.extend(new)
text = db.Text(''.join(new_lines))
patched_content = Content(text=text, parent=self)
patched_content.put()
self.patched_content = patched_content
self.put()
return patched_content
@property
def no_base_file(self):
"""Returns True iff the base file is not available."""
return self.content and self.content.file_too_large
def fetch_base(self):
"""Fetch base file for the patch.
Returns:
A models.Content instance.
Raises:
FetchError: For any kind of problem fetching the content.
"""
rev = patching.ParseRevision(self.lines)
if rev is not None:
if rev == 0:
# rev=0 means it's a new file.
return Content(text=db.Text(u''), parent=self)
# AppEngine can only fetch URLs that db.Link() thinks are OK,
# so try converting to a db.Link() here.
try:
base = db.Link(self.patchset.issue.base)
except db.BadValueError:
msg = 'Invalid base URL for fetching: %s' % self.patchset.issue.base
logging.warn(msg)
raise FetchError(msg)
url = utils.make_url(base, self.filename, rev)
logging.info('Fetching %s', url)
try:
result = urlfetch.fetch(url)
except urlfetch.Error, err:
msg = 'Error fetching %s: %s: %s' % (url, err.__class__.__name__, err)
logging.warn('FetchBase: %s', msg)
raise FetchError(msg)
if result.status_code != 200:
msg = 'Error fetching %s: HTTP status %s' % (url, result.status_code)
logging.warn('FetchBase: %s', msg)
raise FetchError(msg)
return Content(text=utils.to_dbtext(utils.unify_linebreaks(result.content)),
parent=self)
class Comment(db.Model):
"""A Comment for a specific line of a specific file.
This is a descendant of a Patch.
"""
patch = db.ReferenceProperty(Patch) # == parent
message_id = db.StringProperty() # == key_name
author = db.UserProperty(auto_current_user_add=True)
date = db.DateTimeProperty(auto_now=True)
lineno = db.IntegerProperty()
text = db.TextProperty()
left = db.BooleanProperty()
draft = db.BooleanProperty(required=True, default=True)
buckets = None
shorttext = None
def complete(self):
"""Set the shorttext and buckets attributes."""
# TODO(guido): Turn these into caching proprties instead.
# The strategy for buckets is that we want groups of lines that
# start with > to be quoted (and not displayed by
# default). Whitespace-only lines are not considered either quoted
# or not quoted. Same goes for lines that go like "On ... user
# wrote:".
cur_bucket = []
quoted = None
self.buckets = []
def _Append():
if cur_bucket:
self.buckets.append(Bucket(text="\n".join(cur_bucket),
quoted=bool(quoted)))
lines = self.text.splitlines()
for line in lines:
if line.startswith("On ") and line.endswith(":"):
pass
elif line.startswith(">"):
if quoted is False:
_Append()
cur_bucket = []
quoted = True
elif line.strip():
if quoted is True:
_Append()
cur_bucket = []
quoted = False
cur_bucket.append(line)
_Append()
self.shorttext = self.text.lstrip()[:50].rstrip()
# Grab the first 50 chars from the first non-quoted bucket
for bucket in self.buckets:
if not bucket.quoted:
self.shorttext = bucket.text.lstrip()[:50].rstrip()
break
class Bucket(db.Model):
"""A 'Bucket' of text.
A comment may consist of multiple text buckets, some of which may be
collapsed by default (when they represent quoted text).
NOTE: This entity is never written to the database. See Comment.complete().
"""
# TODO(guido): Flesh this out.
text = db.TextProperty()
quoted = db.BooleanProperty()
### Repositories and Branches ###
class Repository(db.Model):
"""A specific Subversion repository."""
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
guid = db.StringProperty() # global unique repository id
def __str__(self):
return self.name
class Branch(db.Model):
"""A trunk, branch, or a tag in a specific Subversion repository."""
repo = db.ReferenceProperty(Repository, required=True)
# Cache repo.name as repo_name, to speed up set_branch_choices()
# in views.IssueBaseForm.
repo_name = db.StringProperty()
category = db.StringProperty(required=True,
choices=('*trunk*', 'branch', 'tag'))
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
### Accounts ###
class Account(db.Model):
"""Maps a user or email address to a user-selected nickname, and more.
Nicknames do not have to be unique.
The default nickname is generated from the email address by
stripping the first '@' sign and everything after it. The email
should not be empty nor should it start with '@' (AssertionError
error is raised if either of these happens).
This also holds a list of ids of starred issues. The expectation
that you won't have more than a dozen or so starred issues (a few
hundred in extreme cases) and the memory used up by a list of
integers of that size is very modest, so this is an efficient
solution. (If someone found a use case for having thousands of
starred issues we'd have to think of a different approach.)
"""
user = db.UserProperty(auto_current_user_add=True, required=True)
email = db.EmailProperty(required=True) # key == <email>
nickname = db.StringProperty(required=True)
default_context = db.IntegerProperty(default=settings.DEFAULT_CONTEXT,
choices=CONTEXT_CHOICES)
default_column_width = db.IntegerProperty(
default=settings.DEFAULT_COLUMN_WIDTH)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
stars = db.ListProperty(int) # Issue ids of all starred issues
fresh = db.BooleanProperty()
uploadpy_hint = db.BooleanProperty(default=True)
notify_by_email = db.BooleanProperty(default=True)
notify_by_chat = db.BooleanProperty(default=False)
# Current user's Account. Updated by middleware.AddUserToRequestMiddleware.
current_user_account = None
lower_email = db.StringProperty()
lower_nickname = db.StringProperty()
xsrf_secret = db.BlobProperty()
# Note that this doesn't get called when doing multi-entity puts.
def put(self):
self.lower_email = str(self.email).lower()
self.lower_nickname = self.nickname.lower()
super(Account, self).put()
@classmethod
def get_account_for_user(cls, user):
"""Get the Account for a user, creating a default one if needed."""
email = user.email()
assert email
key = '<%s>' % email
# Since usually the account already exists, first try getting it
# without the transaction implied by get_or_insert().
account = cls.get_by_key_name(key)
if account is not None:
return account
nickname = cls.create_nickname_for_user(user)
return cls.get_or_insert(key, user=user, email=email, nickname=nickname,
fresh=True)
@classmethod
def create_nickname_for_user(cls, user):
"""Returns a unique nickname for a user."""
name = nickname = user.email().split('@', 1)[0]
next_char = chr(ord(nickname[0].lower())+1)
existing_nicks = [account.lower_nickname
for account in cls.gql(('WHERE lower_nickname >= :1 AND '
'lower_nickname < :2'),
nickname.lower(), next_char)]
suffix = 0
while nickname.lower() in existing_nicks:
suffix += 1
nickname = '%s%d' % (name, suffix)
return nickname
@classmethod
def get_nickname_for_user(cls, user):
"""Get the nickname for a user."""
return cls.get_account_for_user(user).nickname
@classmethod
def get_account_for_email(cls, email):
"""Get the Account for an email address, or return None."""
assert email
key = '<%s>' % email
return cls.get_by_key_name(key)
@classmethod
def get_accounts_for_emails(cls, emails):
"""Get the Accounts for each of a list of email addresses."""
return cls.get_by_key_name(['<%s>' % email for email in emails])
@classmethod
def get_by_key_name(cls, key, **kwds):
"""Override db.Model.get_by_key_name() to use cached value if possible."""
if not kwds and cls.current_user_account is not None:
if key == cls.current_user_account.key().name():
return cls.current_user_account
return super(Account, cls).get_by_key_name(key, **kwds)
@classmethod
def get_multiple_accounts_by_email(cls, emails):
"""Get multiple accounts. Returns a dict by email."""
results = {}
keys = []
for email in emails:
if cls.current_user_account and email == cls.current_user_account.email:
results[email] = cls.current_user_account
else:
keys.append('<%s>' % email)
if keys:
accounts = cls.get_by_key_name(keys)
for account in accounts:
if account is not None:
results[account.email] = account
return results
@classmethod
def get_nickname_for_email(cls, email, default=None):
"""Get the nickname for an email address, possibly a default.
If default is None a generic nickname is computed from the email
address.
Args:
email: email address.
default: If given and no account is found, returned as the default value.
Returns:
Nickname for given email.
"""
account = cls.get_account_for_email(email)
if account is not None and account.nickname:
return account.nickname
if default is not None:
return default
return email.replace('@', '_')
@classmethod
def get_account_for_nickname(cls, nickname):
"""Get the list of Accounts that have this nickname."""
assert nickname
assert '@' not in nickname
return cls.all().filter('lower_nickname =', nickname.lower()).get()
@classmethod
def get_email_for_nickname(cls, nickname):
"""Turn a nickname into an email address.
If the nickname is not unique or does not exist, this returns None.
"""
account = cls.get_account_for_nickname(nickname)
if account is None:
return None
return account.email
def user_has_selected_nickname(self):
"""Return True if the user picked the nickname.
Normally this returns 'not self.fresh', but if that property is
None, we assume that if the created and modified timestamp are
within 2 seconds, the account is fresh (i.e. the user hasn't
selected a nickname yet). We then also update self.fresh, so it
is used as a cache and may even be written back if we're lucky.
"""
if self.fresh is None:
delta = self.created - self.modified
# Simulate delta = abs(delta)
if delta.days < 0:
delta = -delta
self.fresh = (delta.days == 0 and delta.seconds < 2)
return not self.fresh
_drafts = None
@property
def drafts(self):
"""A list of issue ids that have drafts by this user.
This is cached in memcache.
"""
if self._drafts is None:
if self._initialize_drafts():
self._save_drafts()
return self._drafts
def update_drafts(self, issue, have_drafts=None):
"""Update the user's draft status for this issue.
Args:
issue: an Issue instance.
have_drafts: optional bool forcing the draft status. By default,
issue.num_drafts is inspected (which may query the datastore).
The Account is written to the datastore if necessary.
"""
dirty = False
if self._drafts is None:
dirty = self._initialize_drafts()
id = issue.key().id()
if have_drafts is None:
have_drafts = bool(issue.num_drafts) # Beware, this may do a query.
if have_drafts:
if id not in self._drafts:
self._drafts.append(id)
dirty = True
else:
if id in self._drafts:
self._drafts.remove(id)
dirty = True
if dirty:
self._save_drafts()
def _initialize_drafts(self):
"""Initialize self._drafts from scratch.
This mostly exists as a schema conversion utility.
Returns:
True if the user should call self._save_drafts(), False if not.
"""
drafts = memcache.get('user_drafts:' + self.email)
if drafts is not None:
self._drafts = drafts
##logging.info('HIT: %s -> %s', self.email, self._drafts)
return False
# We're looking for the Issue key id. The ancestry of comments goes:
# Issue -> PatchSet -> Patch -> Comment.
issue_ids = set(comment.key().parent().parent().parent().id()
for comment in gql(Comment,
'WHERE author = :1 AND draft = TRUE',
self.user))
self._drafts = list(issue_ids)
##logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)
return True
def _save_drafts(self):
"""Save self._drafts to memcache."""
##logging.info('SAVING: %s -> %s', self.email, self._drafts)
memcache.set('user_drafts:' + self.email, self._drafts, 3600)
def get_xsrf_token(self, offset=0):
"""Return an XSRF token for the current user."""
# This code assumes that
# self.user.email() == users.get_current_user().email()
current_user = users.get_current_user()
if self.user.user_id() != current_user.user_id():
# Mainly for Google Account plus conversion.
logging.info('Updating user_id for %s from %s to %s' % (
self.user.email(), self.user.user_id(), current_user.user_id()))
self.user = current_user
self.put()
if not self.xsrf_secret:
self.xsrf_secret = os.urandom(8)
self.put()
m = md5.new(self.xsrf_secret)
email_str = self.lower_email
if isinstance(email_str, unicode):
email_str = email_str.encode('utf-8')
m.update(self.lower_email)
when = int(time.time()) // 3600 + offset
m.update(str(when))
return m.hexdigest()
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import md5
from django.contrib.syndication.feeds import Feed
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Atom1Feed
from codereview import library
from codereview import models
class BaseFeed(Feed):
title = 'Code Review'
description = 'Rietveld: Code Review Tool hosted on Google App Engine'
feed_type = Atom1Feed
def link(self):
return reverse('codereview.views.index')
def author_name(self):
return 'rietveld'
def item_guid(self, item):
return 'urn:md5:%s' % (md5.new(str(item.key())).hexdigest())
def item_link(self, item):
if isinstance(item, models.PatchSet):
if item.data is not None:
return reverse('codereview.views.download',
args=[item.issue.key().id(),item.key().id()])
else:
# Patch set is too large, only the splitted diffs are available.
return reverse('codereview.views.show', args=[item.parent_key().id()])
if isinstance(item, models.Message):
return '%s#msg-%s' % (reverse('codereview.views.show',
args=[item.issue.key().id()]),
item.key())
return reverse('codereview.views.show', args=[item.key().id()])
def item_title(self, item):
return 'the title'
def item_author_name(self, item):
if isinstance(item, models.Issue):
return library.get_nickname(item.owner, True)
if isinstance(item, models.PatchSet):
return library.get_nickname(item.issue.owner, True)
if isinstance(item, models.Message):
return library.get_nickname(item.sender, True)
return 'Rietveld'
def item_pubdate(self, item):
if isinstance(item, models.Issue):
return item.modified
if isinstance(item, models.PatchSet):
# Use created, not modified, so that commenting on
# a patch set does not bump its place in the RSS feed.
return item.created
if isinstance(item, models.Message):
return item.date
return None
class BaseUserFeed(BaseFeed):
def get_object(self, bits):
"""Returns the account for the requested user feed.
bits is a list of URL path elements. The first element of this list
should be the user's nickname. A 404 is raised if the list is empty or
has more than one element or if the a user with that nickname
doesn't exist.
"""
if len(bits) != 1:
raise ObjectDoesNotExist
obj = bits[0]
account = models.Account.get_account_for_nickname('%s' % obj)
if account is None:
raise ObjectDoesNotExist
return account
class ReviewsFeed(BaseUserFeed):
title = 'Code Review - All issues I have to review'
def items(self, obj):
return _rss_helper(obj.email, 'closed = FALSE AND reviewers = :1',
use_email=True)
class ClosedFeed(BaseUserFeed):
title = "Code Review - Reviews closed by me"
def items(self, obj):
return _rss_helper(obj.email, 'closed = TRUE AND owner = :1')
class MineFeed(BaseUserFeed):
title = 'Code Review - My issues'
def items(self, obj):
return _rss_helper(obj.email, 'closed = FALSE AND owner = :1')
class AllFeed(BaseFeed):
title = 'Code Review - All issues'
def items(self):
query = models.Issue.gql('WHERE closed = FALSE AND private = FALSE '
'ORDER BY modified DESC')
return query.fetch(RSS_LIMIT)
class OneIssueFeed(BaseFeed):
def link(self):
return reverse('codereview.views.index')
def get_object(self, bits):
if len(bits) != 1:
raise ObjectDoesNotExist
obj = models.Issue.get_by_id(int(bits[0]))
if obj:
return obj
raise ObjectDoesNotExist
def title(self, obj):
return 'Code review - Issue %d: %s' % (obj.key().id(), obj.subject)
def items(self, obj):
all = list(obj.patchset_set) + list(obj.message_set)
all.sort(key=self.item_pubdate)
return all
### RSS feeds ###
# Maximum number of issues reported by RSS feeds
RSS_LIMIT = 20
def _rss_helper(email, query_string, use_email=False):
account = models.Account.get_account_for_email(email)
if account is None:
issues = []
else:
query = models.Issue.gql('WHERE %s AND private = FALSE '
'ORDER BY modified DESC' % query_string,
use_email and account.email or account.user)
issues = query.fetch(RSS_LIMIT)
return issues
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Intra-region diff utilities.
Intra-region diff highlights the blocks of code which have been changed or
deleted within a region. So instead of highlighting the whole region marked as
changed, the user can see what exactly was changed within that region.
Terminology:
'region' is a list of consecutive code lines.
'word' is the unit of intra-region diff. Its definition is arbitrary based on
what we think as to be a good unit of difference between two regions.
'block' is a small section of code within a region. It can span multiple
lines. There can be multiple non overlapping blocks within a region. A block
can potentially span the whole region.
The blocks have two representations. One is of the format (offset1, offset2,
size) which is returned by the SequenceMatcher to indicate a match of
length 'size' starting at offset1 in the first/old line and starting at offset2
in the second/new line. We convert this representation to a pair of tuples i.e.
(offset1, size) and (offset2, size) for rendering each side of the diff
separately. This latter representation is also more efficient for doing
compaction of adjacent blocks which reduces the size of the HTML markup. See
CompactBlocks for more details.
SequenceMatcher always returns one special matching block at the end with
contents (len(line1), len(line2), 0). We retain this special block as it
simplifies for loops in rendering the last non-matching block. All functions
which deal with the sequence of blocks assume presence of the special block at
the end of the sequence and retain it.
"""
import cgi
import difflib
import re
# Tag to begin a diff chunk.
BEGIN_TAG = "<span class=\"%s\">"
# Tag to end a diff block.
END_TAG = "</span>"
# Tag used for visual tab indication.
TAB_TAG = "<span class=\"visualtab\">»</span>"
# Color scheme to govern the display properties of diff blocks and matching
# blocks. Each value e.g. 'oldlight' corresponds to a CSS style.
COLOR_SCHEME = {
'old': {
'match': 'oldlight',
'diff': 'olddark',
'bckgrnd': 'oldlight',
},
'new': {
'match': 'newlight',
'diff': 'newdark',
'bckgrnd': 'newlight',
},
'oldmove': {
'match': 'movelight',
'diff': 'oldmovedark',
'bckgrnd': 'movelight'
},
'newmove': {
'match': 'newlight',
'diff': 'newdark',
'bckgrnd': 'newlight'
},
}
# Regular expressions to tokenize lines. Default is 'd'.
EXPRS = {
'a': r'(\w+|[^\w\s]+|\s+)',
'b': r'([A-Za-z0-9]+|[^A-Za-z0-9])',
'c': r'([A-Za-z0-9_]+|[^A-Za-z0-9_])',
'd': r'([^\W_]+|[\W_])',
}
# Maximum total characters in old and new lines for doing intra-region diffs.
# Intra-region diff for larger regions is hard to comprehend and wastes CPU
# time.
MAX_TOTAL_LEN = 10000
def _ExpandTabs(text, column, tabsize, mark_tabs=False):
"""Expand tab characters in a string into spaces.
Args:
text: a string containing tab characters.
column: the initial column for the first character in text
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if true, leave a tab character as the first character
of the expansion, so that the caller can find where
the tabs were.
Note that calling _ExpandTabs with mark_tabs=True is not idempotent.
"""
expanded = ""
while True:
tabpos = text.find("\t")
if tabpos < 0:
break
fillwidth = tabsize - (tabpos + column) % tabsize
column += tabpos + fillwidth
if mark_tabs:
fill = "\t" + " " * (fillwidth - 1)
else:
fill = " " * fillwidth
expanded += text[0:tabpos] + fill
text = text[tabpos+1:]
return expanded + text
def Break(text, offset=0, limit=80, brk="\n ", tabsize=8, mark_tabs=False):
"""Break text into lines.
Break text, which begins at column offset, each time it reaches
column limit.
To break the text, insert brk, which does not count toward
the column count of the next line and is assumed to be valid HTML.
During the text breaking process, replaces tabs with spaces up
to the next column that is a multiple of tabsize.
If mark_tabs is true, replace the first space of each expanded
tab with TAB_TAG.
Input and output are assumed to be in UTF-8; the computation is done
in Unicode. (Still not good enough if zero-width characters are
present.) If the input is not valid UTF-8, then the encoding is
passed through, potentially breaking up multi-byte characters.
We pass the line through cgi.escape before returning it.
A trailing newline is always stripped from the input first.
"""
assert tabsize > 0, tabsize
if text.endswith("\n"):
text = text[:-1]
try:
text = unicode(text, "utf-8")
except:
pass
# Expand all tabs.
# If mark_tabs is true, we retain one \t character as a marker during
# expansion so that we later replace it with an HTML snippet.
text = _ExpandTabs(text, offset, tabsize, mark_tabs)
# Perform wrapping.
if len(text) > limit - offset:
parts, text = [text[0:limit-offset]], text[limit-offset:]
while len(text) > limit:
parts.append(text[0:limit])
text = text[limit:]
parts.append(text)
text = brk.join([cgi.escape(p) for p in parts])
else:
text = cgi.escape(text)
# Colorize tab markers
text = text.replace("\t", TAB_TAG)
if isinstance(text, unicode):
return text.encode("utf-8", "replace")
return text
def CompactBlocks(blocks):
"""Compacts adjacent code blocks.
In many cases 2 adjacent blocks can be merged into one. This allows
to do some further processing on those blocks.
Args:
blocks: [(offset1, size), ...]
Returns:
A list with the same structure as the input with adjacent blocks
merged. However, the last block (which is always assumed to have
a zero size) is never merged. For example, the input
[(0, 2), (2, 8), (10, 5), (15, 0)]
will produce the output [(0, 15), (15, 0)].
"""
if len(blocks) == 1:
return blocks
result = [blocks[0]]
for block in blocks[1:-1]:
last_start, last_len = result[-1]
curr_start, curr_len = block
if last_start + last_len == curr_start:
result[-1] = last_start, last_len + curr_len
else:
result.append(block)
result.append(blocks[-1])
return result
def FilterBlocks(blocks, filter_func):
"""Gets rid of any blocks if filter_func evaluates false for them.
Args:
blocks: [(offset1, offset2, size), ...]; must have at least 1 entry
filter_func: a boolean function taking a single argument of the form
(offset1, offset2, size)
Returns:
A list with the same structure with entries for which filter_func()
returns false removed. However, the last block is always included.
"""
# We retain the 'special' block at the end.
res = [b for b in blocks[:-1] if filter_func(b)]
res.append(blocks[-1])
return res
def GetDiffParams(expr='d', min_match_ratio=0.6, min_match_size=2, dbg=False):
"""Returns a tuple of various parameters which affect intra region diffs.
Args:
expr: regular expression id to use to identify 'words' in the intra region
diff
min_match_ratio: minimum similarity between regions to qualify for intra
region diff
min_match_size: the smallest matching block size to use. Blocks smaller
than this are ignored.
dbg: to turn on generation of debugging information for the diff
Returns:
4 tuple (expr, min_match_ratio, min_match_size, dbg) that can be used to
customize diff. It can be passed to functions like WordDiff and
IntraLineDiff.
"""
assert expr in EXPRS
assert min_match_size in xrange(1, 5)
assert min_match_ratio > 0.0 and min_match_ratio < 1.0
return (expr, min_match_ratio, min_match_size, dbg)
def CanDoIRDiff(old_lines, new_lines):
"""Tells if it would be worth computing the intra region diff.
Calculating IR diff is costly and is usually helpful only for small regions.
We use a heuristic that if the total number of characters is more than a
certain threshold then we assume it is not worth computing the IR diff.
Args:
old_lines: an array of strings containing old text
new_lines: an array of strings containing new text
Returns:
True if we think it is worth computing IR diff for the region defined
by old_lines and new_lines, False otherwise.
TODO: Let GetDiffParams handle MAX_TOTAL_LEN param also.
"""
total_chars = (sum(len(line) for line in old_lines) +
sum(len(line) for line in new_lines))
return total_chars <= MAX_TOTAL_LEN
def WordDiff(line1, line2, diff_params):
"""Returns blocks with positions indiciating word level diffs.
Args:
line1: string representing the left part of the diff
line2: string representing the right part of the diff
diff_params: return value of GetDiffParams
Returns:
A tuple (blocks, ratio) where:
blocks: [(offset1, offset2, size), ...] such that
line1[offset1:offset1+size] == line2[offset2:offset2+size]
and the last block is always (len(line1), len(line2), 0)
ratio: a float giving the diff ratio computed by SequenceMatcher.
"""
match_expr, min_match_ratio, min_match_size, _ = diff_params
exp = EXPRS[match_expr]
# Strings may have been left undecoded up to now. Assume UTF-8.
try:
line1 = unicode(line1, "utf8")
except:
pass
try:
line2 = unicode(line2, "utf8")
except:
pass
a = re.findall(exp, line1, re.U)
b = re.findall(exp, line2, re.U)
s = difflib.SequenceMatcher(None, a, b)
matching_blocks = s.get_matching_blocks()
ratio = s.ratio()
# Don't show intra region diffs if both lines are too different and there is
# more than one block of difference. If there is only one change then we
# still show the intra region diff regardless of how different the blocks
# are.
# Note: We compare len(matching_blocks) with 3 because one block of change
# results in 2 matching blocks. We add the one special block and we get 3
# matching blocks per one block of change.
if ratio < min_match_ratio and len(matching_blocks) > 3:
return ([(0, 0, 0)], ratio)
# For now convert to character level blocks because we already have
# the code to deal with folding across lines for character blocks.
# Create arrays lena an lenb which have cumulative word lengths
# corresponding to word positions in a and b
lena = []
last = 0
for w in a:
lena.append(last)
last += len(w)
lenb = []
last = 0
for w in b:
lenb.append(last)
last += len(w)
lena.append(len(line1))
lenb.append(len(line2))
# Convert to character blocks
blocks = []
for s1, s2, blen in matching_blocks[:-1]:
apos = lena[s1]
bpos = lenb[s2]
block_len = lena[s1+blen] - apos
blocks.append((apos, bpos, block_len))
# Recreate the special block.
blocks.append((len(line1), len(line2), 0))
# Filter any matching blocks which are smaller than the desired threshold.
# We don't remove matching blocks with only a newline character as doing so
# results in showing the matching newline character as non matching which
# doesn't look good.
blocks = FilterBlocks(blocks, lambda b: (b[2] >= min_match_size or
line1[b[0]:b[0]+b[2]] == '\n'))
return (blocks, ratio)
def IntraLineDiff(line1, line2, diff_params, diff_func=WordDiff):
"""Computes intraline diff blocks.
Args:
line1: string representing the left part of the diff
line2: string representing the right part of the diff
diff_params: return value of GetDiffParams
diff_func: a function whose signature matches that of WordDiff() above
Returns:
A tuple of (blocks1, blocks2) corresponding to line1 and line2.
Each element of the tuple is an array of (start_pos, length)
tuples denoting a diff block.
"""
blocks, ratio = diff_func(line1, line2, diff_params)
blocks1 = [(start1, length) for (start1, start2, length) in blocks]
blocks2 = [(start2, length) for (start1, start2, length) in blocks]
return (blocks1, blocks2, ratio)
def DumpDiff(blocks, line1, line2):
"""Helper function to debug diff related problems.
Args:
blocks: [(offset1, offset2, size), ...]
line1: string representing the left part of the diff
line2: string representing the right part of the diff
"""
for offset1, offset2, size in blocks:
print offset1, offset2, size
print offset1, size, ": ", line1[offset1:offset1+size]
print offset2, size, ": ", line2[offset2:offset2+size]
def RenderIntraLineDiff(blocks, line, tag, dbg_info=None, limit=80, indent=5,
tabsize=8, mark_tabs=False):
"""Renders the diff blocks returned by IntraLineDiff function.
Args:
blocks: [(start_pos, size), ...]
line: line of code on which the blocks are to be rendered.
tag: 'new' or 'old' to control the color scheme.
dbg_info: a string that holds debugging informaion header. Debug
information is rendered only if dbg_info is not None.
limit: folding limit to be passed to the Break function.
indent: indentation size to be passed to the Break function.
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if True, mark the first character of each expanded tab visually
Returns:
A tuple of two elements. First element is the rendered version of
the input 'line'. Second element tells if the line has a matching
newline character.
"""
res = ""
prev_start, prev_len = 0, 0
has_newline = False
debug_info = dbg_info
if dbg_info:
debug_info += "\nBlock Count: %d\nBlocks: " % (len(blocks) - 1)
for curr_start, curr_len in blocks:
if dbg_info and curr_len > 0:
debug_info += Break(
"\n(%d, %d):|%s|" %
(curr_start, curr_len, line[curr_start:curr_start+curr_len]),
limit, indent, tabsize, mark_tabs)
res += FoldBlock(line, prev_start + prev_len, curr_start, limit, indent,
tag, 'diff', tabsize, mark_tabs)
res += FoldBlock(line, curr_start, curr_start + curr_len, limit, indent,
tag, 'match', tabsize, mark_tabs)
# TODO: This test should be out of loop rather than inside. Once we
# filter out some junk from blocks (e.g. some empty blocks) we should do
# this test only on the last matching block.
if line[curr_start:curr_start+curr_len].endswith('\n'):
has_newline = True
prev_start, prev_len = curr_start, curr_len
return (res, has_newline, debug_info)
def FoldBlock(src, start, end, limit, indent, tag, btype, tabsize=8,
mark_tabs=False):
"""Folds and renders a block.
Args:
src: line of code
start: starting position of the block within 'src'.
end: ending position of the block within 'src'.
limit: folding limit
indent: indentation to use for folding.
tag: 'new' or 'old' to control the color scheme.
btype: block type i.e. 'match' or 'diff' to control the color schme.
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if True, mark the first character of each expanded tab visually
Returns:
A string representing the rendered block.
"""
text = src[start:end]
# We ignore newlines because we do newline management ourselves.
# Any other new lines with at the end will be stripped off by the Break
# method.
if start >= end or text == '\n':
return ""
fbegin, lend, nl_plus_indent = GetTags(tag, btype, indent)
# 'bol' is beginning of line.
# The text we care about begins at byte offset start
# but if there are tabs it will have a larger column
# offset. Use len(_ExpandTabs()) to find out how many
# columns the starting prefix occupies.
offset_from_bol = len(_ExpandTabs(src[0:start], 0, tabsize)) % limit
brk = lend + nl_plus_indent + fbegin
text = Break(text, offset_from_bol, limit, brk, tabsize, mark_tabs)
if text:
text = fbegin + text + lend
# If this is the first block of the line and this is not the first line then
# insert newline + indent.
if offset_from_bol == 0 and not start == 0:
text = nl_plus_indent + text
return text
def GetTags(tag, btype, indent):
"""Returns various tags for rendering diff blocks.
Args:
tag: a key from COLOR_SCHEME
btype: 'match' or 'diff'
indent: indentation to use
Returns
A 3 tuple (begin_tag, end_tag, formatted_indent_block)
"""
assert tag in COLOR_SCHEME
assert btype in ['match', 'diff']
fbegin = BEGIN_TAG % COLOR_SCHEME[tag][btype]
bbegin = BEGIN_TAG % COLOR_SCHEME[tag]['bckgrnd']
lend = END_TAG
nl_plus_indent = '\n'
if indent > 0:
nl_plus_indent += bbegin + cgi.escape(" "*indent) + lend
return fbegin, lend, nl_plus_indent
def ConvertToSingleLine(lines):
"""Transforms a sequence of strings into a single line.
Returns the state that can be used to reconstruct the original lines with
the newline separators placed at the original place.
Args:
lines: sequence of strings
Returns:
Returns (single_line, state) tuple. 'state' shouldn't be modified by the
caller. It is only used to pass to other functions which will do certain
operations on this state.
'state' is an array containing a dictionary for each item in lines. Each
dictionary has two elements 'pos' and 'blocks'. 'pos' is the end position
of each line in the final converted string. 'blocks' is an array of blocks
for each line of code. These blocks are added using MarkBlock function.
"""
state = []
total_length = 0
for l in lines:
total_length += len(l)
# TODO: Use a tuple instead.
state.append({'pos': total_length, # the line split point
'blocks': [], # blocks which belong to this line
})
result = "".join(lines)
assert len(state) == len(lines)
return (result, state)
def MarkBlock(state, begin, end):
"""Marks a block on a region such that it doesn't cross line boundaries.
It is an operation that can be performed on the single line which was
returned by the ConvertToSingleLine function. This operation marks arbitrary
block [begin,end) on the text. It also ensures that if [begin,end) crosses
line boundaries in the original region then it splits the section up in 2 or
more blocks such that no block crosses the boundaries.
Args:
state: the state returned by ConvertToSingleLine function. The state
contained is modified by this function.
begin: Beginning of the block.
end: End of the block (exclusive).
Returns:
None.
"""
# TODO: Make sure already existing blocks don't overlap
if begin == end:
return
last_pos = 0
for entry in state:
pos = entry['pos']
if begin >= last_pos and begin < pos:
if end < pos:
# block doesn't cross any line boundary
entry['blocks'].append((begin, end))
else:
# block crosses the line boundary
entry['blocks'].append((begin, pos))
MarkBlock(state, pos, end)
break
last_pos = pos
def GetBlocks(state):
"""Returns all the blocks corresponding to the lines in the region.
Args:
state: the state returned by ConvertToSingleLine().
Returns:
An array of [(start_pos, length), ..] with an entry for each line in the
region.
"""
result = []
last_pos = 0
for entry in state:
pos = entry['pos']
# Calculate block start points from the beginning of individual lines.
blocks = [(s[0]-last_pos, s[1]-s[0]) for s in entry['blocks']]
# Add one end marker block.
blocks.append((pos-last_pos, 0))
result.append(blocks)
last_pos = pos
return result
def IntraRegionDiff(old_lines, new_lines, diff_params):
"""Computes intra region diff.
Args:
old_lines: array of strings
new_lines: array of strings
diff_params: return value of GetDiffParams
Returns:
A tuple (old_blocks, new_blocks) containing matching blocks for old and new
lines.
"""
old_line, old_state = ConvertToSingleLine(old_lines)
new_line, new_state = ConvertToSingleLine(new_lines)
old_blocks, new_blocks, ratio = IntraLineDiff(old_line, new_line, diff_params)
for begin, length in old_blocks:
MarkBlock(old_state, begin, begin+length)
old_blocks = GetBlocks(old_state)
for begin, length in new_blocks:
MarkBlock(new_state, begin, begin+length)
new_blocks = GetBlocks(new_state)
return (old_blocks, new_blocks, ratio)
def NormalizeBlocks(blocks, line):
"""Normalizes block representation of an intra line diff.
One diff can have multiple representations. Some times the diff returned by
the difflib for similar text sections is different even within same region.
For example if 2 already indented lines were indented with one additional
space character, the difflib may return the non matching space character to
be any of the already existing spaces. So one line may show non matching
space character as the first space character and the second line may show it
to be the last space character. This is sometimes confusing. This is the
side effect of the new regular expression we are using in WordDiff for
identifying indvidual words. This regular expression ('b') treats a sequence
of punctuation and whitespace characters as individual characters. It has
some visual advantages for showing a character level punctuation change as
one character change rather than a group of character change.
Making the normalization too generic can have performance implications. So
this implementation of normalize blocks intends to handle only one case.
Let's say S represents the space character and () marks a matching block.
Then the normalize operation will do following:
SSSS(SS)(ABCD) => SSSS(SS)(ABCD)
(SS)SSSS(ABCD) => SSSS(SS)(ABCD)
(SSSS)SS(ABCD) => SS(SSSS)(ABCD)
and so on..
Args:
blocks: An array of (offset, len) tuples defined on 'line'. These blocks
mark the matching areas. Anything between these matching blocks is
considered non-matching.
line: The text string on which the blocks are defined.
Returns:
An array of (offset, len) tuples representing the same diff but in
normalized form.
"""
result = []
prev_start, prev_len = blocks[0]
for curr_start, curr_len in blocks[1:]:
# Note: nm_ is a prefix for non matching and m_ is a prefix for matching.
m_len, nm_len = prev_len, curr_start - (prev_start+prev_len)
# This if condition checks if matching and non matching parts are greater
# than zero length and are comprised of spaces ONLY. The last condition
# deals with most of the observed cases of strange diffs.
# Note: curr_start - prev_start == m_l + nm_l
# So line[prev_start:curr_start] == matching_part + non_matching_part.
text = line[prev_start:curr_start]
if m_len > 0 and nm_len > 0 and text == ' ' * len(text):
# Move the matching block towards the end i.e. normalize.
result.append((prev_start + nm_len, m_len))
else:
# Keep the existing matching block.
result.append((prev_start, prev_len))
prev_start, prev_len = curr_start, curr_len
result.append(blocks[-1])
assert len(result) == len(blocks)
return result
def RenderIntraRegionDiff(lines, diff_blocks, tag, ratio, limit=80, indent=5,
tabsize=8, mark_tabs=False, dbg=False):
"""Renders intra region diff for one side.
Args:
lines: list of strings representing source code in the region
diff_blocks: blocks that were returned for this region by IntraRegionDiff()
tag: 'new' or 'old'
ratio: similarity ratio returned by the diff computing function
limit: folding limit
indent: indentation size
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if True, mark the first character of each expanded tab visually
dbg: indicates if debug information should be rendered
Returns:
A list of strings representing the rendered version of each item in input
'lines'.
"""
result = []
dbg_info = None
if dbg:
dbg_info = 'Ratio: %.1f' % ratio
for line, blocks in zip(lines, diff_blocks):
blocks = NormalizeBlocks(blocks, line)
blocks = CompactBlocks(blocks)
diff = RenderIntraLineDiff(blocks,
line,
tag,
dbg_info=dbg_info,
limit=limit,
indent=indent,
tabsize=tabsize,
mark_tabs=mark_tabs)
result.append(diff)
assert len(result) == len(lines)
return result
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to read and apply a unified diff without forking patch(1).
For a discussion of the unified diff format, see my blog on Artima:
http://www.artima.com/weblogs/viewpost.jsp?thread=164293
"""
import difflib
import logging
import re
import sys
_CHUNK_RE = re.compile(r"""
@@
\s+
-
(?: (\d+) (?: , (\d+) )?)
\s+
\+
(?: (\d+) (?: , (\d+) )?)
\s+
@@
""", re.VERBOSE)
def PatchLines(old_lines, patch_lines, name="<patch>"):
"""Patches the old_lines with patches read from patch_lines.
This only reads unified diffs. The header lines are ignored.
Yields (tag, old, new) tuples where old and new are lists of lines.
The tag can either start with "error" or be a tag from difflib: "equal",
"insert", "delete", "replace". After "error" is yielded, no more
tuples are yielded. It is possible that consecutive "equal" tuples
are yielded.
"""
chunks = ParsePatchToChunks(patch_lines, name)
if chunks is None:
return iter([("error: ParsePatchToChunks failed", [], [])])
return PatchChunks(old_lines, chunks)
def PatchChunks(old_lines, chunks):
"""Patche old_lines with chunks.
Yields (tag, old, new) tuples where old and new are lists of lines.
The tag can either start with "error" or be a tag from difflib: "equal",
"insert", "delete", "replace". After "error" is yielded, no more
tuples are yielded. It is possible that consecutive "equal" tuples
are yielded.
"""
if not chunks:
# The patch is a no-op
yield ("equal", old_lines, old_lines)
return
old_pos = 0
for (old_i, old_j), _, old_chunk, new_chunk in chunks:
eq = old_lines[old_pos:old_i]
if eq:
yield "equal", eq, eq
old_pos = old_i
# Check that the patch matches the target file
if old_lines[old_i:old_j] != old_chunk:
logging.warn("mismatch:%s.%s.", old_lines[old_i:old_j], old_chunk)
yield ("error: old chunk mismatch", old_lines[old_i:old_j], old_chunk)
return
# TODO(guido): ParsePatch knows the diff details, but throws the info away
sm = difflib.SequenceMatcher(None, old_chunk, new_chunk)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
yield tag, old_chunk[i1:i2], new_chunk[j1:j2]
old_pos = old_j
# Copy the final matching chunk if any.
eq = old_lines[old_pos:]
if eq:
yield ("equal", eq, eq)
def ParseRevision(lines):
"""Parse the revision number out of the raw lines of the patch.
Returns 0 (new file) if no revision number was found.
"""
for line in lines[:10]:
if line.startswith('@'):
break
m = re.match(r'---\s.*\(.*\s(\d+)\)\s*$', line)
if m:
return int(m.group(1))
return 0
_NO_NEWLINE_MESSAGE = "\\ No newline at end of file"
def ParsePatchToChunks(lines, name="<patch>"):
"""Parses a patch from a list of lines.
Return a list of chunks, where each chunk is a tuple:
old_range, new_range, old_lines, new_lines
Returns a list of chunks (possibly empty); or None if there's a problem.
"""
lineno = 0
raw_chunk = []
chunks = []
old_range = new_range = None
old_last = new_last = 0
in_prelude = True
for line in lines:
lineno += 1
if in_prelude:
# Skip leading lines until after we've seen one starting with '+++'
if line.startswith("+++"):
in_prelude = False
continue
match = _CHUNK_RE.match(line)
if match:
if raw_chunk:
# Process the lines in the previous chunk
old_chunk = []
new_chunk = []
for tag, rest in raw_chunk:
if tag in (" ", "-"):
old_chunk.append(rest)
if tag in (" ", "+"):
new_chunk.append(rest)
# Check consistency
old_i, old_j = old_range
new_i, new_j = new_range
if len(old_chunk) != old_j - old_i or len(new_chunk) != new_j - new_i:
logging.warn("%s:%s: previous chunk has incorrect length",
name, lineno)
return None
chunks.append((old_range, new_range, old_chunk, new_chunk))
raw_chunk = []
# Parse the @@ header
old_ln, old_n, new_ln, new_n = match.groups()
old_ln, old_n, new_ln, new_n = map(long,
(old_ln, old_n or 1,
new_ln, new_n or 1))
# Convert the numbers to list indices we can use
if old_n == 0:
old_i = old_ln
else:
old_i = old_ln - 1
old_j = old_i + old_n
old_range = old_i, old_j
if new_n == 0:
new_i = new_ln
else:
new_i = new_ln - 1
new_j = new_i + new_n
new_range = new_i, new_j
# Check header consistency with previous header
if old_i < old_last or new_i < new_last:
logging.warn("%s:%s: chunk header out of order: %r",
name, lineno, line)
return None
if old_i - old_last != new_i - new_last:
logging.warn("%s:%s: inconsistent chunk header: %r",
name, lineno, line)
return None
old_last = old_j
new_last = new_j
else:
tag, rest = line[0], line[1:]
if tag in (" ", "-", "+"):
raw_chunk.append((tag, rest))
elif line.startswith(_NO_NEWLINE_MESSAGE):
# TODO(guido): need to check that no more lines follow for this file
if raw_chunk:
last_tag, last_rest = raw_chunk[-1]
if last_rest.endswith("\n"):
raw_chunk[-1] = (last_tag, last_rest[:-1])
else:
# Only log if it's a non-blank line. Blank lines we see a lot.
if line and line.strip():
logging.warn("%s:%d: indecypherable input: %r", name, lineno, line)
if chunks or raw_chunk:
break # Trailing garbage isn't so bad
return None
if raw_chunk:
# Process the lines in the last chunk
old_chunk = []
new_chunk = []
for tag, rest in raw_chunk:
if tag in (" ", "-"):
old_chunk.append(rest)
if tag in (" ", "+"):
new_chunk.append(rest)
# Check consistency
old_i, old_j = old_range
new_i, new_j = new_range
if len(old_chunk) != old_j - old_i or len(new_chunk) != new_j - new_i:
print >> sys.stderr, ("%s:%s: last chunk has incorrect length" %
(name, lineno))
return None
chunks.append((old_range, new_range, old_chunk, new_chunk))
raw_chunk = []
return chunks
def ParsePatchToLines(lines):
"""Parses a patch from a list of lines.
Returns None on error, otherwise a list of 3-tuples:
(old_line_no, new_line_no, line)
A line number can be 0 if it doesn't exist in the old/new file.
"""
# TODO: can we share some of this code with ParsePatchToChunks?
result = []
in_prelude = True
for line in lines:
if in_prelude:
result.append((0, 0, line))
# Skip leading lines until after we've seen one starting with '+++'
if line.startswith("+++"):
in_prelude = False
elif line.startswith("@"):
result.append((0, 0, line))
match = _CHUNK_RE.match(line)
if not match:
logging.warn("ParsePatchToLines match failed on %s", line)
return None
old_ln = int(match.groups()[0])
new_ln = int(match.groups()[2])
else:
if line[0] == "-":
result.append((old_ln, 0, line))
old_ln += 1
elif line[0] == "+":
result.append((0, new_ln, line))
new_ln += 1
elif line[0] == " ":
result.append((old_ln, new_ln, line))
old_ln += 1
new_ln += 1
elif line.startswith(_NO_NEWLINE_MESSAGE):
continue
else: # Something else, could be property changes etc.
result.append((0, 0, line))
return result
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""URL mappings for the codereview package."""
# NOTE: Must import *, since Django looks for things here, e.g. handler500.
from django.conf.urls.defaults import *
import django.views.defaults
from codereview import feeds
urlpatterns = patterns(
'codereview.views',
(r'^$', 'index'),
(r'^all$', 'all'),
(r'^mine$', 'mine'),
(r'^starred$', 'starred'),
(r'^new$', 'new'),
(r'^upload$', 'upload'),
(r'^(\d+)$', 'show', {}, 'show_bare_issue_number'),
(r'^(\d+)/(show)?$', 'show'),
(r'^(\d+)/add$', 'add'),
(r'^(\d+)/edit$', 'edit'),
(r'^(\d+)/delete$', 'delete'),
(r'^(\d+)/close$', 'close'),
(r'^(\d+)/mail$', 'mailissue'),
(r'^(\d+)/publish$', 'publish'),
(r'^download/issue(\d+)_(\d+)\.diff', 'download'),
(r'^download/issue(\d+)_(\d+)_(\d+)\.diff', 'download_patch'),
(r'^(\d+)/patch/(\d+)/(\d+)$', 'patch'),
(r'^(\d+)/image/(\d+)/(\d+)/(\d+)$', 'image'),
(r'^(\d+)/diff/(\d+)/(.+)$', 'diff'),
(r'^(\d+)/diff2/(\d+):(\d+)/(.+)$', 'diff2'),
(r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$',
'diff_skipped_lines'),
(r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/$',
django.views.defaults.page_not_found, {}, 'diff_skipped_lines_prefix'),
(r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$',
'diff2_skipped_lines'),
(r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/$',
django.views.defaults.page_not_found, {}, 'diff2_skipped_lines_prefix'),
(r'^(\d+)/upload_content/(\d+)/(\d+)$', 'upload_content'),
(r'^(\d+)/upload_patch/(\d+)$', 'upload_patch'),
(r'^(\d+)/upload_complete/(\d+)?$', 'upload_complete'),
(r'^(\d+)/description$', 'description'),
(r'^(\d+)/fields', 'fields'),
(r'^(\d+)/star$', 'star'),
(r'^(\d+)/unstar$', 'unstar'),
(r'^(\d+)/draft_message$', 'draft_message'),
(r'^api/(\d+)/?$', 'api_issue'),
(r'^api/(\d+)/(\d+)/?$', 'api_patchset'),
(r'^user/(.+)$', 'show_user'),
(r'^inline_draft$', 'inline_draft'),
(r'^repos$', 'repos'),
(r'^repo_new$', 'repo_new'),
(r'^repo_init$', 'repo_init'),
(r'^branch_new/(\d+)$', 'branch_new'),
(r'^branch_edit/(\d+)$', 'branch_edit'),
(r'^branch_delete/(\d+)$', 'branch_delete'),
(r'^settings$', 'settings'),
(r'^account_delete$', 'account_delete'),
(r'^migrate_entities$', 'migrate_entities'),
(r'^user_popup/(.+)$', 'user_popup'),
(r'^(\d+)/patchset/(\d+)$', 'patchset'),
(r'^(\d+)/patchset/(\d+)/delete$', 'delete_patchset'),
(r'^account$', 'account'),
(r'^use_uploadpy$', 'use_uploadpy'),
(r'^_ah/xmpp/message/chat/', 'incoming_chat'),
(r'^_ah/mail/(.*)', 'incoming_mail'),
(r'^xsrf_token$', 'xsrf_token'),
# patching upload.py on the fly
(r'^static/upload.py$', 'customized_upload_py'),
(r'^search$', 'search'),
(r'^tasks/calculate_delta$', 'calculate_delta'),
(r'^tasks/migrate_entities$', 'task_migrate_entities'),
)
feed_dict = {
'reviews': feeds.ReviewsFeed,
'closed': feeds.ClosedFeed,
'mine' : feeds.MineFeed,
'all': feeds.AllFeed,
'issue' : feeds.OneIssueFeed,
}
urlpatterns += patterns(
'',
(r'^rss/(?P<url>.*)$', 'django.contrib.syndication.views.feed',
{'feed_dict': feed_dict}),
)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom middleware. Some of this may be generally useful."""
import logging
from google.appengine.api import users
from google.appengine.runtime import apiproxy_errors
from google.appengine.runtime import DeadlineExceededError
from django.conf import settings
from django.http import Http404, HttpResponse
from django.template import Context, loader
from codereview import models
class AddUserToRequestMiddleware(object):
"""Add a user object and a user_is_admin flag to each request."""
def process_request(self, request):
request.user = users.get_current_user()
request.user_is_admin = users.is_current_user_admin()
# Update the cached value of the current user's Account
account = None
if request.user is not None:
account = models.Account.get_account_for_user(request.user)
models.Account.current_user_account = account
class PropagateExceptionMiddleware(object):
"""Catch exceptions, log them and return a friendly error message.
Disables itself in DEBUG mode.
"""
def _text_requested(self, request):
"""Returns True if a text/plain response is requested."""
# We could use a better heuristics that takes multiple
# media_ranges and quality factors into account. For now we return
# True iff 'text/plain' is the only media range the request
# accepts.
media_ranges = request.META.get('HTTP_ACCEPT', '').split(',')
return len(media_ranges) == 1 and media_ranges[0] == 'text/plain'
def process_exception(self, request, exception):
if settings.DEBUG or isinstance(exception, Http404):
return None
if isinstance(exception, apiproxy_errors.CapabilityDisabledError):
msg = ('Rietveld: App Engine is undergoing maintenance. '
'Please try again in a while.')
status = 503
elif isinstance(exception, (DeadlineExceededError, MemoryError)):
msg = ('Rietveld is too hungry at the moment.'
'Please try again in a while.')
status = 503
else:
msg = 'Unhandled exception.'
status = 500
logging.exception('%s: ' % exception.__class__.__name__)
technical = '%s [%s]' % (exception, exception.__class__.__name__)
if self._text_requested(request):
content = '%s\n\n%s\n' % (msg, technical)
content_type = 'text/plain'
else:
tpl = loader.get_template('exception.html')
ctx = Context({'msg': msg, 'technical': technical})
content = tpl.render(ctx)
content_type = 'text/html'
return HttpResponse(content, status=status, content_type=content_type)
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of helper functions."""
import urlparse
from google.appengine.ext import db
from codereview.exceptions import FetchError
def make_url(base, filename, rev):
"""Helper to construct the URL to fetch.
Args:
base: The base property of the Issue to which the Patch belongs.
filename: The filename property of the Patch instance.
rev: Revision number, or None for head revision.
Returns:
A URL referring to the given revision of the file.
"""
scheme, netloc, path, _, _, _ = urlparse.urlparse(base)
if netloc.endswith(".googlecode.com"):
# Handle Google code repositories
if rev is None:
raise FetchError("Can't access googlecode.com without a revision")
if not path.startswith("/svn/"):
raise FetchError( "Malformed googlecode.com URL (%s)" % base)
path = path[5:] # Strip "/svn/"
url = "%s://%s/svn-history/r%d/%s/%s" % (scheme, netloc, rev,
path, filename)
return url
elif netloc.endswith("sourceforge.net") and rev is not None:
if path.strip().endswith("/"):
path = path.strip()[:-1]
else:
path = path.strip()
splitted_path = path.split("/")
url = "%s://%s/%s/!svn/bc/%d/%s/%s" % (scheme, netloc,
"/".join(splitted_path[1:3]), rev,
"/".join(splitted_path[3:]),
filename)
return url
# Default for viewvc-based URLs (svn.python.org)
url = base
if not url.endswith('/'):
url += '/'
url += filename
if rev is not None:
url += '?rev=%s' % rev
return url
def to_dbtext(text):
"""Helper to turn a string into a db.Text instance.
Args:
text: a string.
Returns:
A db.Text instance.
"""
if isinstance(text, unicode):
# A TypeError is raised if text is unicode and an encoding is given.
return db.Text(text)
else:
try:
return db.Text(text, encoding='utf-8')
except UnicodeDecodeError:
return db.Text(text, encoding='latin-1')
def unify_linebreaks(text):
"""Helper to return a string with all line breaks converted to LF.
Args:
text: a string.
Returns:
A string with all line breaks converted to LF.
"""
return text.replace('\r\n', '\n').replace('\r', '\n')
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exception classes."""
class RietveldError(Exception):
"""Base class for all exceptions in this application."""
class FetchError(RietveldError):
"""Exception raised when fetching of remote files fails."""
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django template library for Rietveld."""
import cgi
from google.appengine.api import memcache
from google.appengine.api import users
import django.template
import django.utils.safestring
from django.core.urlresolvers import reverse
from codereview import models
register = django.template.Library()
user_cache = {}
def get_links_for_users(user_emails):
"""Return a dictionary of email->link to user page and fill caches."""
link_dict = {}
remaining_emails = set(user_emails)
# initialize with email usernames
for email in remaining_emails:
nick = email.split('@', 1)[0]
link_dict[email] = cgi.escape(nick)
# look in the local cache
for email in remaining_emails:
if email in user_cache:
link_dict[email] = user_cache[email]
remaining_emails = remaining_emails - set(user_cache)
if not remaining_emails:
return link_dict
# then look in memcache
memcache_results = memcache.get_multi(remaining_emails,
key_prefix="show_user:")
for email in memcache_results:
link_dict[email] = memcache_results[email]
user_cache[email] = memcache_results[email]
remaining_emails = remaining_emails - set(memcache_results)
if not remaining_emails:
return link_dict
# and finally hit the datastore
accounts = models.Account.get_accounts_for_emails(remaining_emails)
for account in accounts:
if account and account.user_has_selected_nickname:
ret = ('<a href="%s" onMouseOver="M_showUserInfoPopup(this)">%s</a>' %
(reverse('codereview.views.show_user', args=[account.nickname]),
cgi.escape(account.nickname)))
link_dict[account.email] = ret
datastore_results = dict((e, link_dict[e]) for e in remaining_emails)
memcache.set_multi(datastore_results, 300, key_prefix='show_user:')
user_cache.update(datastore_results)
return link_dict
def get_link_for_user(email):
"""Get a link to a user's profile page."""
links = get_links_for_users([email])
return links[email]
@register.filter
def show_user(email, arg=None, _autoescape=None, _memcache_results=None):
"""Render a link to the user's dashboard, with text being the nickname."""
if isinstance(email, users.User):
email = email.email()
if not arg:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
ret = get_link_for_user(email)
return django.utils.safestring.mark_safe(ret)
@register.filter
def show_users(email_list, arg=None):
"""Render list of links to each user's dashboard."""
new_email_list = []
for email in email_list:
if isinstance(email, users.User):
email = email.email()
new_email_list.append(email)
links = get_links_for_users(new_email_list)
if not arg:
user = users.get_current_user()
if user is not None:
links[user.email()] = 'me'
return django.utils.safestring.mark_safe(', '.join(
links[email] for email in email_list))
class UrlAppendViewSettingsNode(django.template.Node):
"""Django template tag that appends context and column_width parameter.
This tag should be used after any URL that requires view settings.
Example:
<a href='{%url /foo%}{%urlappend_view_settings%}'>
The tag tries to get the current column width and context from the
template context and if they're present it returns '?param1¶m2'
otherwise it returns an empty string.
"""
def __init__(self):
super(UrlAppendViewSettingsNode, self).__init__()
self.view_context = django.template.Variable('context')
self.view_colwidth = django.template.Variable('column_width')
def render(self, context):
"""Returns a HTML fragment."""
url_params = []
current_context = -1
try:
current_context = self.view_context.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_context is None:
url_params.append('context=')
elif isinstance(current_context, int) and current_context > 0:
url_params.append('context=%d' % current_context)
current_colwidth = None
try:
current_colwidth = self.view_colwidth.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_colwidth is not None:
url_params.append('column_width=%d' % current_colwidth)
if url_params:
return '?%s' % '&'.join(url_params)
return ''
@register.tag
def urlappend_view_settings(_parser, _token):
"""The actual template tag."""
return UrlAppendViewSettingsNode()
def get_nickname(email, never_me=False, request=None):
"""Return a nickname for an email address.
If 'never_me' is True, 'me' is not returned if 'email' belongs to the
current logged in user. If 'request' is a HttpRequest, it is used to
cache the nickname returned by models.Account.get_nickname_for_email().
"""
if isinstance(email, users.User):
email = email.email()
if not never_me:
if request is not None:
user = request.user
else:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
if request is None:
return models.Account.get_nickname_for_email(email)
# _nicknames is injected into request as a cache.
# TODO(maruel): Use memcache instead.
# Access to a protected member _nicknames of a client class
# pylint: disable=W0212
if getattr(request, '_nicknames', None) is None:
request._nicknames = {}
if email in request._nicknames:
return request._nicknames[email]
result = models.Account.get_nickname_for_email(email)
request._nicknames[email] = result
return result
class NicknameNode(django.template.Node):
"""Renders a nickname for a given email address.
The return value is cached if a HttpRequest is available in a
'request' template variable.
The template tag accepts one or two arguments. The first argument is
the template variable for the email address. If the optional second
argument evaluates to True, 'me' as nickname is never rendered.
Example usage:
{% cached_nickname msg.sender %}
{% cached_nickname msg.sender True %}
"""
def __init__(self, email_address, never_me=''):
"""Constructor.
'email_address' is the name of the template variable that holds an
email address. If 'never_me' evaluates to True, 'me' won't be returned.
"""
super(NicknameNode, self).__init__()
self.email_address = django.template.Variable(email_address)
self.never_me = bool(never_me.strip())
self.is_multi = False
def render(self, context):
try:
email = self.email_address.resolve(context)
except django.template.VariableDoesNotExist:
return ''
request = context.get('request')
if self.is_multi:
return ', '.join(get_nickname(e, self.never_me, request) for e in email)
return get_nickname(email, self.never_me, request)
@register.tag
def nickname(_parser, token):
"""Almost the same as nickname filter but the result is cached."""
try:
_, email_address, never_me = token.split_contents()
except ValueError:
try:
_, email_address = token.split_contents()
never_me = ''
except ValueError:
raise django.template.TemplateSyntaxError(
"%r requires exactly one or two arguments" % token.contents.split()[0])
return NicknameNode(email_address, never_me)
@register.tag
def nicknames(parser, token):
"""Wrapper for nickname tag with is_multi flag enabled."""
node = nickname(parser, token)
node.is_multi = True
return node
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Rietveld."""
import binascii
import datetime
import email # see incoming_mail()
import email.utils
import logging
import md5
import mimetypes
import os
import random
import re
import urllib
from cStringIO import StringIO
from xml.etree import ElementTree
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.api import xmpp
from google.appengine.ext import db
from google.appengine.runtime import DeadlineExceededError
from google.appengine.runtime import apiproxy_errors
from django import forms
# Import settings as django_settings to avoid name conflict with settings().
from django.conf import settings as django_settings
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
import django.template
from django.template import RequestContext
from django.utils import encoding
from django.utils import simplejson
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from codereview import engine
from codereview import library
from codereview import models
from codereview import patching
from codereview import utils
from codereview.exceptions import FetchError
# Add our own custom template tags library.
django.template.add_to_builtins('codereview.library')
### Constants ###
IS_DEV = os.environ['SERVER_SOFTWARE'].startswith('Dev') # Development server
# Maximum forms fields length
MAX_SUBJECT = 100
MAX_DESCRIPTION = 10000
MAX_URL = 2083
MAX_REVIEWERS = 1000
MAX_CC = 2000
MAX_MESSAGE = 10000
MAX_FILENAME = 255
MAX_DB_KEY_LENGTH = 1000
### Form classes ###
class AccountInput(forms.TextInput):
# Associates the necessary css/js files for the control. See
# http://docs.djangoproject.com/en/dev/topics/forms/media/.
#
# Don't forget to place {{formname.media}} into html header
# when using this html control.
class Media:
css = {
'all': ('autocomplete/jquery.autocomplete.css',)
}
js = (
'autocomplete/lib/jquery.js',
'autocomplete/lib/jquery.bgiframe.min.js',
'autocomplete/lib/jquery.ajaxQueue.js',
'autocomplete/jquery.autocomplete.js'
)
def render(self, name, value, attrs=None):
output = super(AccountInput, self).render(name, value, attrs)
if models.Account.current_user_account is not None:
# TODO(anatoli): move this into .js media for this form
data = {'name': name, 'url': reverse(account),
'multiple': 'true'}
if self.attrs.get('multiple', True) == False:
data['multiple'] = 'false'
output += mark_safe(u'''
<script type="text/javascript">
jQuery("#id_%(name)s").autocomplete("%(url)s", {
max: 10,
highlight: false,
multiple: %(multiple)s,
multipleSeparator: ", ",
scroll: true,
scrollHeight: 300,
matchContains: true,
formatResult : function(row) {
return row[0].replace(/ .+/gi, '');
}
});
</script>''' % data)
return output
class IssueBaseForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT,
widget=forms.TextInput(attrs={'size': 60}))
description = forms.CharField(required=False,
max_length=MAX_DESCRIPTION,
widget=forms.Textarea(attrs={'cols': 60}))
branch = forms.ChoiceField(required=False, label='Base URL')
base = forms.CharField(required=False,
max_length=MAX_URL,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
private = forms.BooleanField(required=False, initial=False)
def set_branch_choices(self, base=None):
branches = models.Branch.all()
bound_field = self['branch']
choices = []
default = None
for b in branches:
if not b.repo_name:
b.repo_name = b.repo.name
b.put()
pair = (b.key(), '%s - %s - %s' % (b.repo_name, b.category, b.name))
choices.append(pair)
if default is None and (base is None or b.url == base):
default = b.key()
choices.sort(key=lambda pair: pair[1].lower())
choices.insert(0, ('', '[See Base]'))
bound_field.field.choices = choices
if default is not None:
self.initial['branch'] = default
def get_base(self):
base = self.cleaned_data.get('base')
if not base:
key = self.cleaned_data['branch']
if key:
branch = models.Branch.get(key)
if branch is not None:
base = branch.url
if not base:
self.errors['base'] = ['You must specify a base']
return base or None
class NewForm(IssueBaseForm):
data = forms.FileField(required=False)
url = forms.URLField(required=False,
max_length=MAX_URL,
widget=forms.TextInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False, initial=True)
class AddForm(forms.Form):
message = forms.CharField(max_length=MAX_SUBJECT,
widget=forms.TextInput(attrs={'size': 60}))
data = forms.FileField(required=False)
url = forms.URLField(required=False,
max_length=MAX_URL,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(max_length=MAX_REVIEWERS, required=False,
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False, initial=True)
class UploadForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT)
description = forms.CharField(max_length=MAX_DESCRIPTION, required=False)
content_upload = forms.BooleanField(required=False)
separate_patches = forms.BooleanField(required=False)
base = forms.CharField(max_length=MAX_URL, required=False)
data = forms.FileField(required=False)
issue = forms.IntegerField(required=False)
reviewers = forms.CharField(max_length=MAX_REVIEWERS, required=False)
cc = forms.CharField(max_length=MAX_CC, required=False)
private = forms.BooleanField(required=False, initial=False)
send_mail = forms.BooleanField(required=False)
base_hashes = forms.CharField(required=False)
repo_guid = forms.CharField(required=False, max_length=MAX_URL)
def clean_base(self):
base = self.cleaned_data.get('base')
if not base and not self.cleaned_data.get('content_upload', False):
raise forms.ValidationError, 'Base URL is required.'
return self.cleaned_data.get('base')
def get_base(self):
return self.cleaned_data.get('base')
class UploadContentForm(forms.Form):
filename = forms.CharField(max_length=MAX_FILENAME)
status = forms.CharField(required=False, max_length=20)
checksum = forms.CharField(max_length=32)
file_too_large = forms.BooleanField(required=False)
is_binary = forms.BooleanField(required=False)
is_current = forms.BooleanField(required=False)
def clean(self):
# Check presence of 'data'. We cannot use FileField because
# it disallows empty files.
super(UploadContentForm, self).clean()
if not self.files and 'data' not in self.files:
raise forms.ValidationError, 'No content uploaded.'
return self.cleaned_data
def get_uploaded_content(self):
return self.files['data'].read()
class UploadPatchForm(forms.Form):
filename = forms.CharField(max_length=MAX_FILENAME)
content_upload = forms.BooleanField(required=False)
def get_uploaded_patch(self):
return self.files['data'].read()
class EditForm(IssueBaseForm):
closed = forms.BooleanField(required=False)
class EditLocalBaseForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT,
widget=forms.TextInput(attrs={'size': 60}))
description = forms.CharField(required=False,
max_length=MAX_DESCRIPTION,
widget=forms.Textarea(attrs={'cols': 60}))
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
private = forms.BooleanField(required=False, initial=False)
closed = forms.BooleanField(required=False)
def get_base(self):
return None
class RepoForm(forms.Form):
name = forms.CharField()
url = forms.URLField()
guid = forms.CharField(required=False)
class BranchForm(forms.Form):
category = forms.CharField(
widget=forms.Select(choices=[(ch, ch)
for ch in models.Branch.category.choices]))
name = forms.CharField()
url = forms.URLField()
class PublishForm(forms.Form):
subject = forms.CharField(max_length=MAX_SUBJECT,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False)
message = forms.CharField(required=False,
max_length=MAX_MESSAGE,
widget=forms.Textarea(attrs={'cols': 60}))
message_only = forms.BooleanField(required=False,
widget=forms.HiddenInput())
no_redirect = forms.BooleanField(required=False,
widget=forms.HiddenInput())
in_reply_to = forms.CharField(required=False,
max_length=MAX_DB_KEY_LENGTH,
widget=forms.HiddenInput())
class MiniPublishForm(forms.Form):
reviewers = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=MAX_CC,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False)
message = forms.CharField(required=False,
max_length=MAX_MESSAGE,
widget=forms.Textarea(attrs={'cols': 60}))
message_only = forms.BooleanField(required=False,
widget=forms.HiddenInput())
no_redirect = forms.BooleanField(required=False,
widget=forms.HiddenInput())
FORM_CONTEXT_VALUES = [(x, '%d lines' % x) for x in models.CONTEXT_CHOICES]
FORM_CONTEXT_VALUES.append(('', 'Whole file'))
class SettingsForm(forms.Form):
nickname = forms.CharField(max_length=30)
context = forms.IntegerField(
widget=forms.Select(choices=FORM_CONTEXT_VALUES),
required=False,
label='Context')
column_width = forms.IntegerField(
initial=django_settings.DEFAULT_COLUMN_WIDTH,
min_value=django_settings.MIN_COLUMN_WIDTH,
max_value=django_settings.MAX_COLUMN_WIDTH)
notify_by_email = forms.BooleanField(required=False,
widget=forms.HiddenInput())
notify_by_chat = forms.BooleanField(
required=False,
help_text='You must accept the invite for this to work.')
def clean_nickname(self):
nickname = self.cleaned_data.get('nickname')
# Check for allowed characters
match = re.match(r'[\w\.\-_\(\) ]+$', nickname, re.UNICODE|re.IGNORECASE)
if not match:
raise forms.ValidationError('Allowed characters are letters, digits, '
'".-_()" and spaces.')
# Check for sane whitespaces
if re.search(r'\s{2,}', nickname):
raise forms.ValidationError('Use single spaces between words.')
if len(nickname) != len(nickname.strip()):
raise forms.ValidationError('Leading and trailing whitespaces are '
'not allowed.')
if nickname.lower() == 'me':
raise forms.ValidationError('Choose a different nickname.')
# Look for existing nicknames
accounts = list(models.Account.gql('WHERE lower_nickname = :1',
nickname.lower()))
for account in accounts:
if account.key() == models.Account.current_user_account.key():
continue
raise forms.ValidationError('This nickname is already in use.')
return nickname
class MigrateEntitiesForm(forms.Form):
account = forms.CharField(label='Your previous email address')
_user = None
def set_user(self, user):
"""Sets the _user attribute.
A user object is needed for validation. This method has to be
called before is_valid() is called to allow us to validate if a
email address given in account belongs to the same user.
"""
self._user = user
def clean_account(self):
"""Verifies that an account with this emails exists and returns it.
This method is executed by Django when Form.is_valid() is called.
"""
if self._user is None:
raise forms.ValidationError('No user given.')
account = models.Account.get_account_for_email(self.cleaned_data['account'])
if account is None:
raise forms.ValidationError('No such email.')
if account.user.email() == self._user.email():
raise forms.ValidationError(
'Nothing to do. This is your current email address.')
if account.user.user_id() != self._user.user_id():
raise forms.ValidationError(
'This email address isn\'t related to your account.')
return account
class SearchForm(forms.Form):
format = forms.ChoiceField(
required=False,
choices=(
('html', 'html'),
('json', 'json')),
widget=forms.HiddenInput(attrs={'value': 'html'}))
keys_only = forms.BooleanField(
required=False,
widget=forms.HiddenInput(attrs={'value': 'False'}))
with_messages = forms.BooleanField(
required=False,
widget=forms.HiddenInput(attrs={'value': 'False'}))
cursor = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={'value': ''}))
limit = forms.IntegerField(
required=False,
min_value=1,
max_value=1000,
initial=10,
widget=forms.HiddenInput(attrs={'value': '10'}))
closed = forms.NullBooleanField(required=False)
owner = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60,
'multiple': False}))
reviewer = forms.CharField(required=False,
max_length=MAX_REVIEWERS,
widget=AccountInput(attrs={'size': 60,
'multiple': False}))
repo_guid = forms.CharField(required=False, max_length=MAX_URL,
label="Repository ID")
base = forms.CharField(required=False, max_length=MAX_URL)
private = forms.NullBooleanField(required=False)
created_before = forms.DateTimeField(required=False, label='Created before')
created_after = forms.DateTimeField(
required=False, label='Created on or after')
modified_before = forms.DateTimeField(required=False, label='Modified before')
modified_after = forms.DateTimeField(
required=False, label='Modified on or after')
def _clean_accounts(self, key):
"""Cleans up autocomplete field.
The input is validated to be zero or one name/email and it's
validated that the users exists.
Args:
key: the field name.
Returns an User instance or raises ValidationError.
"""
accounts = filter(None,
(x.strip()
for x in self.cleaned_data.get(key, '').split(',')))
if len(accounts) > 1:
raise forms.ValidationError('Only one user name is allowed.')
elif not accounts:
return None
account = accounts[0]
if '@' in account:
acct = models.Account.get_account_for_email(account)
else:
acct = models.Account.get_account_for_nickname(account)
if not acct:
raise forms.ValidationError('Unknown user')
return acct.user
def clean_owner(self):
return self._clean_accounts('owner')
def clean_reviewer(self):
user = self._clean_accounts('reviewer')
if user:
return user.email()
### Exceptions ###
class InvalidIncomingEmailError(Exception):
"""Exception raised by incoming mail handler when a problem occurs."""
### Helper functions ###
# Counter displayed (by respond()) below) on every page showing how
# many requests the current incarnation has handled, not counting
# redirects. Rendered by templates/base.html.
counter = 0
def respond(request, template, params=None):
"""Helper to render a response, passing standard stuff to the response.
Args:
request: The request object.
template: The template name; '.html' is appended automatically.
params: A dict giving the template parameters; modified in-place.
Returns:
Whatever render_to_response(template, params) returns.
Raises:
Whatever render_to_response(template, params) raises.
"""
global counter
counter += 1
if params is None:
params = {}
must_choose_nickname = False
uploadpy_hint = False
if request.user is not None:
account = models.Account.current_user_account
must_choose_nickname = not account.user_has_selected_nickname()
uploadpy_hint = account.uploadpy_hint
params['request'] = request
params['counter'] = counter
params['user'] = request.user
params['is_admin'] = request.user_is_admin
params['is_dev'] = IS_DEV
params['media_url'] = django_settings.MEDIA_URL
params['special_banner'] = getattr(django_settings, 'SPECIAL_BANNER', None)
full_path = request.get_full_path().encode('utf-8')
if request.user is None:
params['sign_in'] = users.create_login_url(full_path)
else:
params['sign_out'] = users.create_logout_url(full_path)
account = models.Account.current_user_account
if account is not None:
params['xsrf_token'] = account.get_xsrf_token()
params['must_choose_nickname'] = must_choose_nickname
params['uploadpy_hint'] = uploadpy_hint
params['rietveld_revision'] = django_settings.RIETVELD_REVISION
try:
return render_to_response(template, params,
context_instance=RequestContext(request))
finally:
library.user_cache.clear() # don't want this sticking around
def _random_bytes(n):
"""Helper returning a string of random bytes of given length."""
return ''.join(map(chr, (random.randrange(256) for i in xrange(n))))
def _clean_int(value, default, min_value=None, max_value=None):
"""Helper to cast value to int and to clip it to min or max_value.
Args:
value: Any value (preferably something that can be casted to int).
default: Default value to be used when type casting fails.
min_value: Minimum allowed value (default: None).
max_value: Maximum allowed value (default: None).
Returns:
An integer between min_value and max_value.
"""
if not isinstance(value, (int, long)):
try:
value = int(value)
except (TypeError, ValueError):
value = default
if min_value is not None:
value = max(min_value, value)
if max_value is not None:
value = min(value, max_value)
return value
def _can_view_issue(user, issue):
if user is None:
return not issue.private
user_email = db.Email(user.email().lower())
return (not issue.private
or issue.owner == user
or user_email in issue.cc
or user_email in issue.reviewers)
def _notify_issue(request, issue, message):
"""Try sending an XMPP (chat) message.
Args:
request: The request object.
issue: Issue whose owner, reviewers, CC are to be notified.
message: Text of message to send, e.g. 'Created'.
The current user and the issue's subject and URL are appended to the message.
Returns:
True if the message was (apparently) delivered, False if not.
"""
iid = issue.key().id()
emails = [issue.owner.email()]
if issue.reviewers:
emails.extend(issue.reviewers)
if issue.cc:
emails.extend(issue.cc)
accounts = models.Account.get_multiple_accounts_by_email(emails)
jids = []
for account in accounts.itervalues():
logging.debug('email=%r,chat=%r', account.email, account.notify_by_chat)
if account.notify_by_chat:
jids.append(account.email)
if not jids:
logging.debug('No XMPP jids to send to for issue %d', iid)
return True # Nothing to do.
jids_str = ', '.join(jids)
logging.debug('Sending XMPP for issue %d to %s', iid, jids_str)
sender = '?'
if models.Account.current_user_account:
sender = models.Account.current_user_account.nickname
elif request.user:
sender = request.user.email()
message = '%s by %s: %s\n%s' % (message,
sender,
issue.subject,
request.build_absolute_uri(
reverse(show, args=[iid])))
try:
sts = xmpp.send_message(jids, message)
except Exception, err:
logging.exception('XMPP exception %s sending for issue %d to %s',
err, iid, jids_str)
return False
else:
if sts == [xmpp.NO_ERROR] * len(jids):
logging.info('XMPP message sent for issue %d to %s', iid, jids_str)
return True
else:
logging.error('XMPP error %r sending for issue %d to %s',
sts, iid, jids_str)
return False
class HttpTextResponse(HttpResponse):
def __init__(self, *args, **kwargs):
kwargs['content_type'] = 'text/plain; charset=utf-8'
super(HttpTextResponse, self).__init__(*args, **kwargs)
class HttpHtmlResponse(HttpResponse):
def __init__(self, *args, **kwargs):
kwargs['content_type'] = 'text/html; charset=utf-8'
super(HttpHtmlResponse, self).__init__(*args, **kwargs)
### Decorators for request handlers ###
def post_required(func):
"""Decorator that returns an error unless request.method == 'POST'."""
def post_wrapper(request, *args, **kwds):
if request.method != 'POST':
return HttpTextResponse('This requires a POST request.', status=405)
return func(request, *args, **kwds)
return post_wrapper
def login_required(func):
"""Decorator that redirects to the login page if you're not logged in."""
def login_wrapper(request, *args, **kwds):
if request.user is None:
return HttpResponseRedirect(
users.create_login_url(request.get_full_path().encode('utf-8')))
return func(request, *args, **kwds)
return login_wrapper
def xsrf_required(func):
"""Decorator to check XSRF token.
This only checks if the method is POST; it lets other method go
through unchallenged. Apply after @login_required and (if
applicable) @post_required. This decorator is mutually exclusive
with @upload_required.
"""
def xsrf_wrapper(request, *args, **kwds):
if request.method == 'POST':
post_token = request.POST.get('xsrf_token')
if not post_token:
return HttpTextResponse('Missing XSRF token.', status=403)
account = models.Account.current_user_account
if not account:
return HttpTextResponse('Must be logged in for XSRF check.', status=403)
xsrf_token = account.get_xsrf_token()
if post_token != xsrf_token:
# Try the previous hour's token
xsrf_token = account.get_xsrf_token(-1)
if post_token != xsrf_token:
msg = [u'Invalid XSRF token.']
if request.POST:
msg.extend([u'',
u'However, this was the data posted to the server:',
u''])
for key in request.POST:
msg.append(u'%s: %s' % (key, request.POST[key]))
msg.extend([u'', u'-'*10,
u'Please reload the previous page and post again.'])
return HttpTextResponse(u'\n'.join(msg), status=403)
return func(request, *args, **kwds)
return xsrf_wrapper
def upload_required(func):
"""Decorator for POST requests from the upload.py script.
Right now this is for documentation only, but eventually we should
change this to insist on a special header that JavaScript cannot
add, to prevent XSRF attacks on these URLs. This decorator is
mutually exclusive with @xsrf_required.
"""
return func
def admin_required(func):
"""Decorator that insists that you're logged in as administratior."""
def admin_wrapper(request, *args, **kwds):
if request.user is None:
return HttpResponseRedirect(
users.create_login_url(request.get_full_path().encode('utf-8')))
if not request.user_is_admin:
return HttpTextResponse(
'You must be admin in for this function', status=403)
return func(request, *args, **kwds)
return admin_wrapper
def issue_required(func):
"""Decorator that processes the issue_id handler argument."""
def issue_wrapper(request, issue_id, *args, **kwds):
issue = models.Issue.get_by_id(int(issue_id))
if issue is None:
return HttpTextResponse(
'No issue exists with that id (%s)' % issue_id, status=404)
if issue.private:
if request.user is None:
return HttpResponseRedirect(
users.create_login_url(request.get_full_path().encode('utf-8')))
if not _can_view_issue(request.user, issue):
return HttpTextResponse(
'You do not have permission to view this issue', status=403)
request.issue = issue
return func(request, *args, **kwds)
return issue_wrapper
def user_key_required(func):
"""Decorator that processes the user handler argument."""
def user_key_wrapper(request, user_key, *args, **kwds):
user_key = urllib.unquote(user_key)
if '@' in user_key:
request.user_to_show = users.User(user_key)
else:
account = models.Account.get_account_for_nickname(user_key)
if not account:
logging.info("account not found for nickname %s" % user_key)
return HttpTextResponse(
'No user found with that key (%s)' % urllib.quote(user_key),
status=404)
request.user_to_show = account.user
return func(request, *args, **kwds)
return user_key_wrapper
def owner_required(func):
"""Decorator that insists you own the issue.
It must appear after issue_required or equivalent, like patchset_required.
"""
@login_required
def owner_wrapper(request, *args, **kwds):
if request.issue.owner != request.user:
return HttpTextResponse('You do not own this issue', status=403)
return func(request, *args, **kwds)
return owner_wrapper
def issue_owner_required(func):
"""Decorator that processes the issue_id argument and insists you own it."""
@issue_required
@owner_required
def issue_owner_wrapper(request, *args, **kwds):
return func(request, *args, **kwds)
return issue_owner_wrapper
def issue_editor_required(func):
"""Decorator that processes the issue_id argument and insists the user has
permission to edit it."""
@login_required
@issue_required
def issue_editor_wrapper(request, *args, **kwds):
if not request.issue.user_can_edit(request.user):
return HttpTextResponse(
'You do not have permission to edit this issue', status=403)
return func(request, *args, **kwds)
return issue_editor_wrapper
def patchset_required(func):
"""Decorator that processes the patchset_id argument."""
@issue_required
def patchset_wrapper(request, patchset_id, *args, **kwds):
patchset = models.PatchSet.get_by_id(int(patchset_id), parent=request.issue)
if patchset is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % patchset_id, status=404)
patchset.issue = request.issue
request.patchset = patchset
return func(request, *args, **kwds)
return patchset_wrapper
def patchset_owner_required(func):
"""Decorator that processes the patchset_id argument and insists you own the
issue."""
@patchset_required
@owner_required
def patchset_owner_wrapper(request, *args, **kwds):
return func(request, *args, **kwds)
return patchset_owner_wrapper
def patch_required(func):
"""Decorator that processes the patch_id argument."""
@patchset_required
def patch_wrapper(request, patch_id, *args, **kwds):
patch = models.Patch.get_by_id(int(patch_id), parent=request.patchset)
if patch is None:
return HttpTextResponse(
'No patch exists with that id (%s/%s)' %
(request.patchset.key().id(), patch_id),
status=404)
patch.patchset = request.patchset
request.patch = patch
return func(request, *args, **kwds)
return patch_wrapper
def patch_filename_required(func):
"""Decorator that processes the patch_id argument."""
@patchset_required
def patch_wrapper(request, patch_filename, *args, **kwds):
patch = models.Patch.gql('WHERE patchset = :1 AND filename = :2',
request.patchset, patch_filename).get()
if patch is None and patch_filename.isdigit():
# It could be an old URL which has a patch ID instead of a filename
patch = models.Patch.get_by_id(int(patch_filename),
parent=request.patchset)
if patch is None:
return respond(request, 'diff_missing.html',
{'issue': request.issue,
'patchset': request.patchset,
'patch': None,
'patchsets': request.issue.patchset_set,
'filename': patch_filename})
patch.patchset = request.patchset
request.patch = patch
return func(request, *args, **kwds)
return patch_wrapper
def image_required(func):
"""Decorator that processes the image argument.
Attributes set on the request:
content: a Content entity.
"""
@patch_required
def image_wrapper(request, image_type, *args, **kwds):
content = None
if image_type == "0":
content = request.patch.content
elif image_type == "1":
content = request.patch.patched_content
# Other values are erroneous so request.content won't be set.
if not content or not content.data:
return HttpResponseRedirect(django_settings.MEDIA_URL + "blank.jpg")
request.mime_type = mimetypes.guess_type(request.patch.filename)[0]
if not request.mime_type or not request.mime_type.startswith('image/'):
return HttpResponseRedirect(django_settings.MEDIA_URL + "blank.jpg")
request.content = content
return func(request, *args, **kwds)
return image_wrapper
def json_response(func):
"""Decorator that converts into JSON any returned value that is not an
HttpResponse. It handles `pretty` URL parameter to tune JSON response for
either performance or readability."""
def json_wrapper(request, *args, **kwds):
data = func(request, *args, **kwds)
if isinstance(data, HttpResponse):
return data
if request.REQUEST.get('pretty','0').lower() in ('1', 'true', 'on'):
data = simplejson.dumps(data, indent=' ', sort_keys=True)
else:
data = simplejson.dumps(data, separators=(',',':'))
return HttpResponse(data, content_type='application/json; charset=utf-8')
return json_wrapper
### Request handlers ###
def index(request):
"""/ - Show a list of review issues"""
if request.user is None:
return all(request, index_call=True)
else:
return mine(request)
DEFAULT_LIMIT = 10
def _url(path, **kwargs):
"""Format parameters for query string.
Args:
path: Path of URL.
kwargs: Keyword parameters are treated as values to add to the query
parameter of the URL. If empty no query parameters will be added to
path and '?' omitted from the URL.
"""
if kwargs:
encoded_parameters = urllib.urlencode(kwargs)
if path.endswith('?'):
# Trailing ? on path. Append parameters to end.
return '%s%s' % (path, encoded_parameters)
elif '?' in path:
# Append additional parameters to existing query parameters.
return '%s&%s' % (path, encoded_parameters)
else:
# Add query parameters to path with no query parameters.
return '%s?%s' % (path, encoded_parameters)
else:
return path
def _inner_paginate(request, issues, template, extra_template_params):
"""Display paginated list of issues.
Takes care of the private bit.
Args:
request: Request containing offset and limit parameters.
issues: Issues to be displayed.
template: Name of template that renders issue page.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
visible_issues = [i for i in issues if _can_view_issue(request.user, i)]
_optimize_draft_counts(visible_issues)
_load_users_for_issues(visible_issues)
params = {
'issues': visible_issues,
'limit': None,
'newest': None,
'prev': None,
'next': None,
'nexttext': '',
'first': '',
'last': '',
}
if extra_template_params:
params.update(extra_template_params)
return respond(request, template, params)
def _paginate_issues(page_url,
request,
query,
template,
extra_nav_parameters=None,
extra_template_params=None):
"""Display paginated list of issues.
Args:
page_url: Base URL of issue page that is being paginated. Typically
generated by calling 'reverse' with a name and arguments of a view
function.
request: Request containing offset and limit parameters.
query: Query over issues.
template: Name of template that renders issue page.
extra_nav_parameters: Dictionary of extra parameters to append to the
navigation links.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
offset = _clean_int(request.GET.get('offset'), 0, 0)
limit = _clean_int(request.GET.get('limit'), DEFAULT_LIMIT, 1, 100)
nav_parameters = {'limit': str(limit)}
if extra_nav_parameters is not None:
nav_parameters.update(extra_nav_parameters)
params = {
'limit': limit,
'first': offset + 1,
'nexttext': 'Older',
}
# Fetch one more to see if there should be a 'next' link
issues = query.fetch(limit+1, offset)
if len(issues) > limit:
del issues[limit:]
params['next'] = _url(page_url, offset=offset + limit, **nav_parameters)
params['last'] = len(issues) > 1 and offset+len(issues) or None
if offset > 0:
params['prev'] = _url(page_url, offset=max(0, offset - limit),
**nav_parameters)
if offset > limit:
params['newest'] = _url(page_url, **nav_parameters)
if extra_template_params:
params.update(extra_template_params)
return _inner_paginate(request, issues, template, params)
def _paginate_issues_with_cursor(page_url,
request,
query,
limit,
template,
extra_nav_parameters=None,
extra_template_params=None):
"""Display paginated list of issues using a cursor instead of offset.
Args:
page_url: Base URL of issue page that is being paginated. Typically
generated by calling 'reverse' with a name and arguments of a view
function.
request: Request containing offset and limit parameters.
query: Query over issues.
limit: Maximum number of issues to return.
template: Name of template that renders issue page.
extra_nav_parameters: Dictionary of extra parameters to append to the
navigation links.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
issues = query.fetch(limit)
nav_parameters = {}
if extra_nav_parameters:
nav_parameters.update(extra_nav_parameters)
nav_parameters['cursor'] = query.cursor()
params = {
'limit': limit,
'cursor': nav_parameters['cursor'],
'nexttext': 'Newer',
}
# Fetch one more to see if there should be a 'next' link. Do it in a separate
# request so we have a valid cursor.
if query.fetch(1):
params['next'] = _url(page_url, **nav_parameters)
if extra_template_params:
params.update(extra_template_params)
return _inner_paginate(request, issues, template, params)
def all(request, index_call=False):
"""/all - Show a list of up to DEFAULT_LIMIT recent issues."""
closed = request.GET.get('closed', '')
if closed in ('0', 'false'):
closed = False
elif closed in ('1', 'true'):
closed = True
elif index_call:
# for index we display only open issues by default
closed = False
else:
closed = None
nav_parameters = {}
if closed is not None:
nav_parameters['closed'] = int(closed)
query = models.Issue.all().filter('private =', False)
if closed is not None:
# return only opened or closed issues
query.filter('closed =', closed)
query.order('-modified')
return _paginate_issues(reverse(all),
request,
query,
'all.html',
extra_nav_parameters=nav_parameters,
extra_template_params=dict(closed=closed))
def _optimize_draft_counts(issues):
"""Force _num_drafts to zero for issues that are known to have no drafts.
Args:
issues: list of model.Issue instances.
This inspects the drafts attribute of the current user's Account
instance, and forces the draft count to zero of those issues in the
list that aren't mentioned there.
If there is no current user, all draft counts are forced to 0.
"""
account = models.Account.current_user_account
if account is None:
issue_ids = None
else:
issue_ids = account.drafts
for issue in issues:
if issue_ids is None or issue.key().id() not in issue_ids:
issue._num_drafts = 0
@login_required
def mine(request):
"""/mine - Show a list of issues created by the current user."""
request.user_to_show = request.user
return _show_user(request)
@login_required
def starred(request):
"""/starred - Show a list of issues starred by the current user."""
stars = models.Account.current_user_account.stars
if not stars:
issues = []
else:
issues = [issue for issue in models.Issue.get_by_id(stars)
if issue is not None
and _can_view_issue(request.user, issue)]
_load_users_for_issues(issues)
_optimize_draft_counts(issues)
return respond(request, 'starred.html', {'issues': issues})
def _load_users_for_issues(issues):
"""Load all user links for a list of issues in one go."""
user_dict = {}
for i in issues:
for e in i.reviewers + i.cc + [i.owner.email()]:
# keeping a count lets you track total vs. distinct if you want
user_dict[e] = user_dict.setdefault(e, 0) + 1
library.get_links_for_users(user_dict.keys())
@user_key_required
def show_user(request):
"""/user - Show the user's dashboard"""
return _show_user(request)
def _show_user(request):
user = request.user_to_show
if user == request.user:
query = models.Comment.all().filter('draft =', True)
query = query.filter('author =', request.user).fetch(100)
draft_keys = set(d.parent_key().parent().parent() for d in query)
draft_issues = models.Issue.get(draft_keys)
else:
draft_issues = draft_keys = []
my_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND owner = :1 '
'ORDER BY modified DESC '
'LIMIT 100',
user)
if issue.key() not in draft_keys and _can_view_issue(request.user, issue)]
review_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND reviewers = :1 '
'ORDER BY modified DESC '
'LIMIT 100',
user.email().lower())
if (issue.key() not in draft_keys and issue.owner != user
and _can_view_issue(request.user, issue))]
closed_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = TRUE AND modified > :1 AND owner = :2 '
'ORDER BY modified DESC '
'LIMIT 100',
datetime.datetime.now() - datetime.timedelta(days=7),
user)
if issue.key() not in draft_keys and _can_view_issue(request.user, issue)]
cc_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND cc = :1 '
'ORDER BY modified DESC '
'LIMIT 100',
user.email())
if (issue.key() not in draft_keys and issue.owner != user
and _can_view_issue(request.user, issue))]
all_issues = my_issues + review_issues + closed_issues + cc_issues
_load_users_for_issues(all_issues)
_optimize_draft_counts(all_issues)
return respond(request, 'user.html',
{'email': user.email(),
'my_issues': my_issues,
'review_issues': review_issues,
'closed_issues': closed_issues,
'cc_issues': cc_issues,
'draft_issues': draft_issues,
})
@login_required
@xsrf_required
def new(request):
"""/new - Upload a new patch set.
GET shows a blank form, POST processes it.
"""
if request.method != 'POST':
form = NewForm()
form.set_branch_choices()
return respond(request, 'new.html', {'form': form})
form = NewForm(request.POST, request.FILES)
form.set_branch_choices()
issue, _ = _make_new(request, form)
if issue is None:
return respond(request, 'new.html', {'form': form})
else:
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
@login_required
@xsrf_required
def use_uploadpy(request):
"""Show an intermediate page about upload.py."""
if request.method == 'POST':
if 'disable_msg' in request.POST:
models.Account.current_user_account.uploadpy_hint = False
models.Account.current_user_account.put()
if 'download' in request.POST:
url = reverse(customized_upload_py)
else:
url = reverse(new)
return HttpResponseRedirect(url)
return respond(request, 'use_uploadpy.html')
@post_required
@upload_required
def upload(request):
"""/upload - Like new() or add(), but from the upload.py script.
This generates a text/plain response.
"""
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpTextResponse('Login required', status=401)
# Check against old upload.py usage.
if request.POST.get('num_parts') > 1:
return HttpTextResponse('Upload.py is too old, get the latest version.')
form = UploadForm(request.POST, request.FILES)
issue = None
patchset = None
if form.is_valid():
issue_id = form.cleaned_data['issue']
if issue_id:
action = 'updated'
issue = models.Issue.get_by_id(issue_id)
if issue is None:
form.errors['issue'] = ['No issue exists with that id (%s)' %
issue_id]
elif issue.local_base and not form.cleaned_data.get('content_upload'):
form.errors['issue'] = ['Base files upload required for that issue.']
issue = None
else:
if request.user != issue.owner:
form.errors['user'] = ['You (%s) don\'t own this issue (%s)' %
(request.user, issue_id)]
issue = None
else:
patchset = _add_patchset_from_form(request, issue, form, 'subject',
emails_add_only=True)
if not patchset:
issue = None
else:
action = 'created'
issue, patchset = _make_new(request, form)
if issue is None:
msg = 'Issue creation errors: %s' % repr(form.errors)
else:
msg = ('Issue %s. URL: %s' %
(action,
request.build_absolute_uri(
reverse('show_bare_issue_number', args=[issue.key().id()]))))
if (form.cleaned_data.get('content_upload') or
form.cleaned_data.get('separate_patches')):
# Extend the response message: 2nd line is patchset id.
msg +="\n%d" % patchset.key().id()
if form.cleaned_data.get('content_upload'):
# Extend the response: additional lines are the expected filenames.
issue.local_base = True
issue.put()
base_hashes = {}
for file_info in form.cleaned_data.get('base_hashes').split("|"):
if not file_info:
break
checksum, filename = file_info.split(":", 1)
base_hashes[filename] = checksum
content_entities = []
new_content_entities = []
patches = list(patchset.patch_set)
existing_patches = {}
patchsets = list(issue.patchset_set)
if len(patchsets) > 1:
# Only check the last uploaded patchset for speed.
last_patch_set = patchsets[-2].patch_set
patchsets = None # Reduce memory usage.
for opatch in last_patch_set:
if opatch.content:
existing_patches[opatch.filename] = opatch
for patch in patches:
content = None
# Check if the base file is already uploaded in another patchset.
if (patch.filename in base_hashes and
patch.filename in existing_patches and
(base_hashes[patch.filename] ==
existing_patches[patch.filename].content.checksum)):
content = existing_patches[patch.filename].content
patch.status = existing_patches[patch.filename].status
patch.is_binary = existing_patches[patch.filename].is_binary
if not content:
content = models.Content(is_uploaded=True, parent=patch)
new_content_entities.append(content)
content_entities.append(content)
existing_patches = None # Reduce memory usage.
if new_content_entities:
db.put(new_content_entities)
for patch, content_entity in zip(patches, content_entities):
patch.content = content_entity
id_string = patch.key().id()
if content_entity not in new_content_entities:
# Base file not needed since we reused a previous upload. Send its
# patch id in case it's a binary file and the new content needs to
# be uploaded. We mark this by prepending 'nobase' to the id.
id_string = "nobase_" + str(id_string)
msg += "\n%s %s" % (id_string, patch.filename)
db.put(patches)
return HttpTextResponse(msg)
@post_required
@patch_required
@upload_required
def upload_content(request):
"""/<issue>/upload_content/<patchset>/<patch> - Upload base file contents.
Used by upload.py to upload base files.
"""
form = UploadContentForm(request.POST, request.FILES)
if not form.is_valid():
return HttpTextResponse(
'ERROR: Upload content errors:\n%s' % repr(form.errors))
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpTextResponse('Error: Login required', status=401)
if request.user != request.issue.owner:
return HttpTextResponse('ERROR: You (%s) don\'t own this issue (%s).' %
(request.user, request.issue.key().id()))
patch = request.patch
patch.status = form.cleaned_data['status']
patch.is_binary = form.cleaned_data['is_binary']
patch.put()
if form.cleaned_data['is_current']:
if patch.patched_content:
return HttpTextResponse('ERROR: Already have current content.')
content = models.Content(is_uploaded=True, parent=patch)
content.put()
patch.patched_content = content
patch.put()
else:
content = patch.content
if form.cleaned_data['file_too_large']:
content.file_too_large = True
else:
data = form.get_uploaded_content()
checksum = md5.new(data).hexdigest()
if checksum != request.POST.get('checksum'):
content.is_bad = True
content.put()
return HttpTextResponse('ERROR: Checksum mismatch.')
if patch.is_binary:
content.data = data
else:
content.text = utils.to_dbtext(utils.unify_linebreaks(data))
content.checksum = checksum
content.put()
return HttpTextResponse('OK')
@post_required
@patchset_required
@upload_required
def upload_patch(request):
"""/<issue>/upload_patch/<patchset> - Upload patch to patchset.
Used by upload.py to upload a patch when the diff is too large to upload all
together.
"""
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpTextResponse('Error: Login required', status=401)
if request.user != request.issue.owner:
return HttpTextResponse(
'ERROR: You (%s) don\'t own this issue (%s).' %
(request.user, request.issue.key().id()))
form = UploadPatchForm(request.POST, request.FILES)
if not form.is_valid():
return HttpTextResponse(
'ERROR: Upload patch errors:\n%s' % repr(form.errors))
patchset = request.patchset
if patchset.data:
return HttpTextResponse(
'ERROR: Can\'t upload patches to patchset with data.')
text = utils.to_dbtext(utils.unify_linebreaks(form.get_uploaded_patch()))
patch = models.Patch(patchset=patchset,
text=text,
filename=form.cleaned_data['filename'], parent=patchset)
patch.put()
if form.cleaned_data.get('content_upload'):
content = models.Content(is_uploaded=True, parent=patch)
content.put()
patch.content = content
patch.put()
msg = 'OK\n' + str(patch.key().id())
return HttpTextResponse(msg)
@post_required
@issue_owner_required
@upload_required
def upload_complete(request, patchset_id=None):
"""/<issue>/upload_complete/<patchset> - Patchset upload is complete.
/<issue>/upload_complete/ - used when no base files are uploaded.
The following POST parameters are handled:
- send_mail: If 'yes', a notification mail will be send.
- attach_patch: If 'yes', the patches will be attached to the mail.
"""
if patchset_id is not None:
patchset = models.PatchSet.get_by_id(int(patchset_id),
parent=request.issue)
if patchset is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % patchset_id, status=403)
# Add delta calculation task.
taskqueue.add(url=reverse(calculate_delta),
params={'key': str(patchset.key())},
queue_name='deltacalculation')
else:
patchset = None
# Check for completeness
errors = []
if request.issue.local_base and patchset is not None:
query = patchset.patch_set.filter('is_binary =', False)
query = query.filter('status =', None) # all uploaded file have a status
if query.count() > 0:
errors.append('Base files missing.')
# Create (and send) a message if needed.
if request.POST.get('send_mail') == 'yes' or request.POST.get('message'):
msg = _make_message(request, request.issue, request.POST.get('message', ''),
send_mail=(request.POST.get('send_mail', '') == 'yes'))
msg.put()
_notify_issue(request, request.issue, 'Mailed')
if errors:
msg = ('The following errors occured:\n%s\n'
'Try to upload the changeset again.'
% '\n'.join(errors))
status = 500
else:
msg = 'OK'
status = 200
return HttpTextResponse(msg, status=status)
class EmptyPatchSet(Exception):
"""Exception used inside _make_new() to break out of the transaction."""
def _make_new(request, form):
"""Creates new issue and fill relevant fields from given form data.
Sends notification about created issue (if requested with send_mail param).
Returns (Issue, PatchSet) or (None, None).
"""
if not form.is_valid():
return (None, None)
data_url = _get_data_url(form)
if data_url is None:
return (None, None)
data, url, separate_patches = data_url
reviewers = _get_emails(form, 'reviewers')
if not form.is_valid() or reviewers is None:
return (None, None)
cc = _get_emails(form, 'cc')
if not form.is_valid():
return (None, None)
base = form.get_base()
if base is None:
return (None, None)
def txn():
issue = models.Issue(subject=form.cleaned_data['subject'],
description=form.cleaned_data['description'],
base=base,
repo_guid=form.cleaned_data.get('repo_guid', None),
reviewers=reviewers,
cc=cc,
private=form.cleaned_data.get('private', False),
n_comments=0)
issue.put()
patchset = models.PatchSet(issue=issue, data=data, url=url, parent=issue)
patchset.put()
if not separate_patches:
patches = engine.ParsePatchSet(patchset)
if not patches:
raise EmptyPatchSet # Abort the transaction
db.put(patches)
return issue, patchset
try:
issue, patchset = db.run_in_transaction(txn)
except EmptyPatchSet:
errkey = url and 'url' or 'data'
form.errors[errkey] = ['Patch set contains no recognizable patches']
return (None, None)
if form.cleaned_data.get('send_mail'):
msg = _make_message(request, issue, '', '', True)
msg.put()
_notify_issue(request, issue, 'Created')
return (issue, patchset)
def _get_data_url(form):
"""Helper for _make_new() above and add() below.
Args:
form: Django form object.
Returns:
3-tuple (data, url, separate_patches).
data: the diff content, if available.
url: the url of the diff, if given.
separate_patches: True iff the patches will be uploaded separately for
each file.
"""
cleaned_data = form.cleaned_data
data = cleaned_data['data']
url = cleaned_data.get('url')
separate_patches = cleaned_data.get('separate_patches')
if not (data or url or separate_patches):
form.errors['data'] = ['You must specify a URL or upload a file (< 1 MB).']
return None
if data and url:
form.errors['data'] = ['You must specify either a URL or upload a file '
'but not both.']
return None
if separate_patches and (data or url):
form.errors['data'] = ['If the patches will be uploaded separately later, '
'you can\'t send some data or a url.']
return None
if data is not None:
data = db.Blob(utils.unify_linebreaks(data.read()))
url = None
elif url:
try:
fetch_result = urlfetch.fetch(url)
except Exception, err:
form.errors['url'] = [str(err)]
return None
if fetch_result.status_code != 200:
form.errors['url'] = ['HTTP status code %s' % fetch_result.status_code]
return None
data = db.Blob(utils.unify_linebreaks(fetch_result.content))
return data, url, separate_patches
@post_required
@issue_owner_required
@xsrf_required
def add(request):
"""/<issue>/add - Add a new PatchSet to an existing Issue."""
issue = request.issue
form = AddForm(request.POST, request.FILES)
if not _add_patchset_from_form(request, issue, form):
return show(request, issue.key().id(), form)
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _add_patchset_from_form(request, issue, form, message_key='message',
emails_add_only=False):
"""Helper for add() and upload()."""
# TODO(guido): use a transaction like in _make_new(); may be share more code?
if form.is_valid():
data_url = _get_data_url(form)
if not form.is_valid():
return None
if request.user != issue.owner:
# This check is done at each call site but check again as a safety measure.
return None
data, url, separate_patches = data_url
message = form.cleaned_data[message_key]
patchset = models.PatchSet(issue=issue, message=message, data=data, url=url,
parent=issue)
patchset.put()
if not separate_patches:
patches = engine.ParsePatchSet(patchset)
if not patches:
patchset.delete()
errkey = url and 'url' or 'data'
form.errors[errkey] = ['Patch set contains no recognizable patches']
return None
db.put(patches)
if emails_add_only:
emails = _get_emails(form, 'reviewers')
if not form.is_valid():
return None
issue.reviewers += [reviewer for reviewer in emails
if reviewer not in issue.reviewers]
emails = _get_emails(form, 'cc')
if not form.is_valid():
return None
issue.cc += [cc for cc in emails if cc not in issue.cc]
else:
issue.reviewers = _get_emails(form, 'reviewers')
issue.cc = _get_emails(form, 'cc')
issue.put()
if form.cleaned_data.get('send_mail'):
msg = _make_message(request, issue, message, '', True)
msg.put()
_notify_issue(request, issue, 'Updated')
return patchset
def _get_emails(form, label):
"""Helper to return the list of reviewers, or None for error."""
raw_emails = form.cleaned_data.get(label)
if raw_emails:
return _get_emails_from_raw(raw_emails.split(','), form=form, label=label)
return []
def _get_emails_from_raw(raw_emails, form=None, label=None):
emails = []
for email in raw_emails:
email = email.strip()
if email:
try:
if '@' not in email:
account = models.Account.get_account_for_nickname(email)
if account is None:
raise db.BadValueError('Unknown user: %s' % email)
db_email = db.Email(account.user.email().lower())
elif email.count('@') != 1:
raise db.BadValueError('Invalid email address: %s' % email)
else:
_, tail = email.split('@')
if '.' not in tail:
raise db.BadValueError('Invalid email address: %s' % email)
db_email = db.Email(email.lower())
except db.BadValueError, err:
if form:
form.errors[label] = [unicode(err)]
return None
if db_email not in emails:
emails.append(db_email)
return emails
def _calculate_delta(patch, patchset_id, patchsets):
"""Calculates which files in earlier patchsets this file differs from.
Args:
patch: The file to compare.
patchset_id: The file's patchset's key id.
patchsets: A list of existing patchsets.
Returns:
A list of patchset ids.
"""
delta = []
if patch.no_base_file:
return delta
for other in patchsets:
if patchset_id == other.key().id():
break
if not hasattr(other, 'parsed_patches'):
other.parsed_patches = None # cache variable for already parsed patches
if other.data or other.parsed_patches:
# Loading all the Patch entities in every PatchSet takes too long
# (DeadLineExceeded) and consumes a lot of memory (MemoryError) so instead
# just parse the patchset's data. Note we can only do this if the
# patchset was small enough to fit in the data property.
if other.parsed_patches is None:
# PatchSet.data is stored as db.Blob (str). Try to convert it
# to unicode so that Python doesn't need to do this conversion
# when comparing text and patch.text, which is db.Text
# (unicode).
try:
other.parsed_patches = engine.SplitPatch(other.data.decode('utf-8'))
except UnicodeDecodeError: # Fallback to str - unicode comparison.
other.parsed_patches = engine.SplitPatch(other.data)
other.data = None # Reduce memory usage.
for filename, text in other.parsed_patches:
if filename == patch.filename:
if text != patch.text:
delta.append(other.key().id())
break
else:
# We could not find the file in the previous patchset. It must
# be new wrt that patchset.
delta.append(other.key().id())
else:
# other (patchset) is too big to hold all the patches inside itself, so
# we need to go to the datastore. Use the index to see if there's a
# patch against our current file in other.
query = models.Patch.all()
query.filter("filename =", patch.filename)
query.filter("patchset =", other.key())
other_patches = query.fetch(100)
if other_patches and len(other_patches) > 1:
logging.info("Got %s patches with the same filename for a patchset",
len(other_patches))
for op in other_patches:
if op.text != patch.text:
delta.append(other.key().id())
break
else:
# We could not find the file in the previous patchset. It must
# be new wrt that patchset.
delta.append(other.key().id())
return delta
def _get_patchset_info(request, patchset_id):
""" Returns a list of patchsets for the issue.
Args:
request: Django Request object.
patchset_id: The id of the patchset that the caller is interested in. This
is the one that we generate delta links to if they're not available. We
can't generate for all patchsets because it would take too long on issues
with many patchsets. Passing in None is equivalent to doing it for the
last patchset.
Returns:
A 3-tuple of (issue, patchsets, HttpResponse).
If HttpResponse is not None, further processing should stop and it should be
returned.
"""
issue = request.issue
patchsets = list(issue.patchset_set.order('created'))
response = None
if not patchset_id and patchsets:
patchset_id = patchsets[-1].key().id()
if request.user:
drafts = list(models.Comment.gql('WHERE ANCESTOR IS :1 AND draft = TRUE'
' AND author = :2',
issue, request.user))
else:
drafts = []
comments = list(models.Comment.gql('WHERE ANCESTOR IS :1 AND draft = FALSE',
issue))
issue.draft_count = len(drafts)
for c in drafts:
c.ps_key = c.patch.patchset.key()
patchset_id_mapping = {} # Maps from patchset id to its ordering number.
for patchset in patchsets:
patchset_id_mapping[patchset.key().id()] = len(patchset_id_mapping) + 1
patchset.n_drafts = sum(c.ps_key == patchset.key() for c in drafts)
patchset.patches = None
patchset.parsed_patches = None
if patchset_id == patchset.key().id():
patchset.patches = list(patchset.patch_set.order('filename'))
try:
attempt = _clean_int(request.GET.get('attempt'), 0, 0)
if attempt < 0:
response = HttpTextResponse('Invalid parameter', status=404)
break
for patch in patchset.patches:
pkey = patch.key()
patch._num_comments = sum(c.parent_key() == pkey for c in comments)
patch._num_drafts = sum(c.parent_key() == pkey for c in drafts)
if not patch.delta_calculated:
if attempt > 2:
# Too many patchsets or files and we're not able to generate the
# delta links. Instead of giving a 500, try to render the page
# without them.
patch.delta = []
else:
# Compare each patch to the same file in earlier patchsets to see
# if they differ, so that we can generate the delta patch urls.
# We do this once and cache it after. It's specifically not done
# on upload because we're already doing too much processing there.
# NOTE: this function will clear out patchset.data to reduce
# memory so don't ever call patchset.put() after calling it.
patch.delta = _calculate_delta(patch, patchset_id, patchsets)
patch.delta_calculated = True
# A multi-entity put would be quicker, but it fails when the
# patches have content that is large. App Engine throws
# RequestTooLarge. This way, although not as efficient, allows
# multiple refreshes on an issue to get things done, as opposed to
# an all-or-nothing approach.
patch.put()
# Reduce memory usage: if this patchset has lots of added/removed
# files (i.e. > 100) then we'll get MemoryError when rendering the
# response. Each Patch entity is using a lot of memory if the files
# are large, since it holds the entire contents. Call num_chunks and
# num_drafts first though since they depend on text.
# These are 'active' properties and have side-effects when looked up.
# pylint: disable=W0104
patch.num_chunks
patch.num_drafts
patch.num_added
patch.num_removed
patch.text = None
patch._lines = None
patch.parsed_deltas = []
for delta in patch.delta:
patch.parsed_deltas.append([patchset_id_mapping[delta], delta])
except DeadlineExceededError:
logging.exception('DeadlineExceededError in _get_patchset_info')
if attempt > 2:
response = HttpTextResponse(
'DeadlineExceededError - create a new issue.')
else:
response = HttpResponseRedirect('%s?attempt=%d' %
(request.path, attempt + 1))
break
# Reduce memory usage (see above comment).
for patchset in patchsets:
patchset.parsed_patches = None
return issue, patchsets, response
@issue_required
def show(request, form=None):
"""/<issue> - Show an issue."""
issue, patchsets, response = _get_patchset_info(request, None)
if response:
return response
if not form:
form = AddForm(initial={'reviewers': ', '.join(issue.reviewers)})
last_patchset = first_patch = None
if patchsets:
last_patchset = patchsets[-1]
if last_patchset.patches:
first_patch = last_patchset.patches[0]
messages = []
has_draft_message = False
for msg in issue.message_set.order('date'):
if not msg.draft:
messages.append(msg)
elif msg.draft and request.user and msg.sender == request.user.email():
has_draft_message = True
num_patchsets = len(patchsets)
return respond(request, 'issue.html',
{'issue': issue, 'patchsets': patchsets,
'messages': messages, 'form': form,
'last_patchset': last_patchset,
'num_patchsets': num_patchsets,
'first_patch': first_patch,
'has_draft_message': has_draft_message,
})
@patchset_required
def patchset(request):
"""/patchset/<key> - Returns patchset information."""
patchset = request.patchset
issue, patchsets, response = _get_patchset_info(request, patchset.key().id())
if response:
return response
for ps in patchsets:
if ps.key().id() == patchset.key().id():
patchset = ps
return respond(request, 'patchset.html',
{'issue': issue,
'patchset': patchset,
'patchsets': patchsets,
})
@login_required
def account(request):
"""/account/?q=blah&limit=10×tamp=blah - Used for autocomplete."""
def searchAccounts(property, domain, added, response):
query = request.GET.get('q').lower()
limit = _clean_int(request.GET.get('limit'), 10, 10, 100)
accounts = models.Account.all()
accounts.filter("lower_%s >= " % property, query)
accounts.filter("lower_%s < " % property, query + u"\ufffd")
accounts.order("lower_%s" % property)
for account in accounts:
if account.key() in added:
continue
if domain and not account.email.endswith(domain):
continue
if len(added) >= limit:
break
added.add(account.key())
response += '%s (%s)\n' % (account.email, account.nickname)
return added, response
added = set()
response = ''
domain = os.environ['AUTH_DOMAIN']
if domain != 'gmail.com':
# 'gmail.com' is the value AUTH_DOMAIN is set to if the app is running
# on appspot.com and shouldn't prioritize the custom domain.
added, response = searchAccounts("email", domain, added, response)
added, response = searchAccounts("nickname", domain, added, response)
added, response = searchAccounts("nickname", "", added, response)
added, response = searchAccounts("email", "", added, response)
return HttpTextResponse(response)
@issue_editor_required
@xsrf_required
def edit(request):
"""/<issue>/edit - Edit an issue."""
issue = request.issue
base = issue.base
if issue.local_base:
form_cls = EditLocalBaseForm
else:
form_cls = EditForm
if request.method != 'POST':
reviewers = [models.Account.get_nickname_for_email(reviewer,
default=reviewer)
for reviewer in issue.reviewers]
ccs = [models.Account.get_nickname_for_email(cc, default=cc)
for cc in issue.cc]
form = form_cls(initial={'subject': issue.subject,
'description': issue.description,
'base': base,
'reviewers': ', '.join(reviewers),
'cc': ', '.join(ccs),
'closed': issue.closed,
'private': issue.private,
})
if not issue.local_base:
form.set_branch_choices(base)
return respond(request, 'edit.html', {'issue': issue, 'form': form})
form = form_cls(request.POST)
if not issue.local_base:
form.set_branch_choices()
if form.is_valid():
reviewers = _get_emails(form, 'reviewers')
if form.is_valid():
cc = _get_emails(form, 'cc')
if form.is_valid() and not issue.local_base:
base = form.get_base()
if not form.is_valid():
return respond(request, 'edit.html', {'issue': issue, 'form': form})
cleaned_data = form.cleaned_data
was_closed = issue.closed
issue.subject = cleaned_data['subject']
issue.description = cleaned_data['description']
issue.closed = cleaned_data['closed']
issue.private = cleaned_data.get('private', False)
base_changed = (issue.base != base)
issue.base = base
issue.reviewers = reviewers
issue.cc = cc
if base_changed:
for patchset in issue.patchset_set:
db.run_in_transaction(_delete_cached_contents, list(patchset.patch_set))
issue.put()
if issue.closed == was_closed:
message = 'Edited'
elif issue.closed:
message = 'Closed'
else:
message = 'Reopened'
_notify_issue(request, issue, message)
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _delete_cached_contents(patch_set):
"""Transactional helper for edit() to delete cached contents."""
# TODO(guido): No need to do this in a transaction.
patches = []
contents = []
for patch in patch_set:
try:
content = patch.content
except db.Error:
content = None
try:
patched_content = patch.patched_content
except db.Error:
patched_content = None
if content is not None:
contents.append(content)
if patched_content is not None:
contents.append(patched_content)
patch.content = None
patch.patched_content = None
patches.append(patch)
if contents:
logging.info("Deleting %d contents", len(contents))
db.delete(contents)
if patches:
logging.info("Updating %d patches", len(patches))
db.put(patches)
@post_required
@issue_owner_required
@xsrf_required
def delete(request):
"""/<issue>/delete - Delete an issue. There is no way back."""
issue = request.issue
tbd = [issue]
for cls in [models.PatchSet, models.Patch, models.Comment,
models.Message, models.Content]:
tbd += cls.gql('WHERE ANCESTOR IS :1', issue)
db.delete(tbd)
_notify_issue(request, issue, 'Deleted')
return HttpResponseRedirect(reverse(mine))
@post_required
@patchset_owner_required
@xsrf_required
def delete_patchset(request):
"""/<issue>/patch/<patchset>/delete - Delete a patchset.
There is no way back.
"""
issue = request.issue
ps_delete = request.patchset
ps_id = ps_delete.key().id()
patchsets_after = issue.patchset_set.filter('created >', ps_delete.created)
patches = []
for patchset in patchsets_after:
for patch in patchset.patch_set:
if patch.delta_calculated:
if ps_id in patch.delta:
patches.append(patch)
db.run_in_transaction(_patchset_delete, ps_delete, patches)
_notify_issue(request, issue, 'Patchset deleted')
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _patchset_delete(ps_delete, patches):
"""Transactional helper for delete_patchset.
Args:
ps_delete: The patchset to be deleted.
patches: Patches that have delta against patches of ps_delete.
"""
patchset_id = ps_delete.key().id()
tbp = []
for patch in patches:
patch.delta.remove(patchset_id)
tbp.append(patch)
if tbp:
db.put(tbp)
tbd = [ps_delete]
for cls in [models.Patch, models.Comment]:
tbd += cls.gql('WHERE ANCESTOR IS :1', ps_delete)
db.delete(tbd)
@post_required
@issue_editor_required
@xsrf_required
def close(request):
"""/<issue>/close - Close an issue."""
issue = request.issue
issue.closed = True
if request.method == 'POST':
new_description = request.POST.get('description')
if new_description:
issue.description = new_description
issue.put()
_notify_issue(request, issue, 'Closed')
return HttpTextResponse('Closed')
@post_required
@issue_required
@upload_required
def mailissue(request):
"""/<issue>/mail - Send mail for an issue.
This URL is deprecated and shouldn't be used anymore. However,
older versions of upload.py or wrapper scripts still may use it.
"""
if request.issue.owner != request.user:
if not IS_DEV:
return HttpTextResponse('Login required', status=401)
issue = request.issue
msg = _make_message(request, issue, '', '', True)
msg.put()
_notify_issue(request, issue, 'Mailed')
return HttpTextResponse('OK')
@patchset_required
def download(request):
"""/download/<issue>_<patchset>.diff - Download a patch set."""
if request.patchset.data is None:
return HttpTextResponse(
'Patch set (%s) is too large.' % request.patchset.key().id(),
status=404)
padding = ''
user_agent = request.META.get('HTTP_USER_AGENT')
if user_agent and 'MSIE' in user_agent:
# Add 256+ bytes of padding to prevent XSS attacks on Internet Explorer.
padding = ('='*67 + '\n') * 4
return HttpTextResponse(padding + request.patchset.data)
@issue_required
@upload_required
def description(request):
"""/<issue>/description - Gets/Sets an issue's description.
Used by upload.py or similar scripts.
"""
if request.method != 'POST':
description = request.issue.description or ""
return HttpTextResponse(description)
if not request.issue.user_can_edit(request.user):
if not IS_DEV:
return HttpTextResponse('Login required', status=401)
issue = request.issue
issue.description = request.POST.get('description')
issue.put()
_notify_issue(request, issue, 'Changed')
return HttpTextResponse('')
@issue_required
@upload_required
@json_response
def fields(request):
"""/<issue>/fields - Gets/Sets fields on the issue.
Used by upload.py or similar scripts for partial updates of the issue
without a patchset..
"""
# Only recognizes a few fields for now.
if request.method != 'POST':
fields = request.GET.getlist('field')
response = {}
if 'reviewers' in fields:
response['reviewers'] = request.issue.reviewers or []
if 'description' in fields:
response['description'] = request.issue.description
if 'subject' in fields:
response['subject'] = request.issue.subject
return response
if not request.issue.user_can_edit(request.user):
if not IS_DEV:
return HttpTextResponse('Login required', status=401)
fields = simplejson.loads(request.POST.get('fields'))
issue = request.issue
if 'description' in fields:
issue.description = fields['description']
if 'reviewers' in fields:
issue.reviewers = _get_emails_from_raw(fields['reviewers'])
if 'subject' in fields:
issue.subject = fields['subject']
issue.put()
_notify_issue(request, issue, 'Changed')
return HttpTextResponse('')
@patch_required
def patch(request):
"""/<issue>/patch/<patchset>/<patch> - View a raw patch."""
return patch_helper(request)
def patch_helper(request, nav_type='patch'):
"""Returns a unified diff.
Args:
request: Django Request object.
nav_type: the navigation used in the url (i.e. patch/diff/diff2). Normally
the user looks at either unified or side-by-side diffs at one time, going
through all the files in the same mode. However, if side-by-side is not
available for some files, we temporarly switch them to unified view, then
switch them back when we can. This way they don't miss any files.
Returns:
Whatever respond() returns.
"""
_add_next_prev(request.patchset, request.patch)
request.patch.nav_type = nav_type
parsed_lines = patching.ParsePatchToLines(request.patch.lines)
if parsed_lines is None:
return HttpTextResponse('Can\'t parse the patch to lines', status=404)
rows = engine.RenderUnifiedTableRows(request, parsed_lines)
return respond(request, 'patch.html',
{'patch': request.patch,
'patchset': request.patchset,
'view_style': 'patch',
'rows': rows,
'issue': request.issue,
'context': _clean_int(request.GET.get('context'), -1),
'column_width': _clean_int(request.GET.get('column_width'),
None),
})
@image_required
def image(request):
"""/<issue>/content/<patchset>/<patch>/<content> - Return patch's content."""
response = HttpResponse(request.content.data, content_type=request.mime_type)
filename = re.sub(
r'[^\w\.]', '_', request.patch.filename.encode('ascii', 'replace'))
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
response['Cache-Control'] = 'no-cache, no-store'
return response
@patch_required
def download_patch(request):
"""/download/issue<issue>_<patchset>_<patch>.diff - Download patch."""
return HttpTextResponse(request.patch.text)
def _issue_as_dict(issue, messages, request=None):
"""Converts an issue into a dict."""
values = {
'owner': library.get_nickname(issue.owner, True, request),
'owner_email': issue.owner.email(),
'modified': str(issue.modified),
'created': str(issue.created),
'closed': issue.closed,
'cc': issue.cc,
'reviewers': issue.reviewers,
'patchsets': [p.key().id() for p in issue.patchset_set.order('created')],
'description': issue.description,
'subject': issue.subject,
'issue': issue.key().id(),
'base_url': issue.base,
'private': issue.private,
}
if messages:
values['messages'] = [
{
'sender': m.sender,
'recipients': m.recipients,
'date': str(m.date),
'text': m.text,
'approval': m.approval,
'disapproval': m.disapproval,
}
for m in models.Message.gql('WHERE ANCESTOR IS :1', issue)
]
return values
def _patchset_as_dict(patchset, request=None):
"""Converts a patchset into a dict."""
values = {
'patchset': patchset.key().id(),
'issue': patchset.issue.key().id(),
'owner': library.get_nickname(patchset.issue.owner, True, request),
'owner_email': patchset.issue.owner.email(),
'message': patchset.message,
'url': patchset.url,
'created': str(patchset.created),
'modified': str(patchset.modified),
'num_comments': patchset.num_comments,
'files': {},
}
for patch in models.Patch.gql("WHERE patchset = :1", patchset):
# num_comments and num_drafts are left out for performance reason:
# they cause a datastore query on first access. They could be added
# optionally if the need ever arises.
values['files'][patch.filename] = {
'id': patch.key().id(),
'is_binary': patch.is_binary,
'no_base_file': patch.no_base_file,
'num_added': patch.num_added,
'num_chunks': patch.num_chunks,
'num_removed': patch.num_removed,
'status': patch.status,
'property_changes': '\n'.join(patch.property_changes),
}
return values
@issue_required
@json_response
def api_issue(request):
"""/api/<issue> - Gets issue's data as a JSON-encoded dictionary."""
messages = ('messages' in request.GET and
request.GET.get('messages').lower() == 'true')
values = _issue_as_dict(request.issue, messages, request)
return values
@patchset_required
@json_response
def api_patchset(request):
"""/api/<issue>/<patchset> - Gets an issue's patchset data as a JSON-encoded
dictionary.
"""
values = _patchset_as_dict(request.patchset, request)
return values
def _get_context_for_user(request):
"""Returns the context setting for a user.
The value is validated against models.CONTEXT_CHOICES.
If an invalid value is found, the value is overwritten with
django_settings.DEFAULT_CONTEXT.
"""
get_param = request.GET.get('context') or None
if 'context' in request.GET and get_param is None:
# User wants to see whole file. No further processing is needed.
return get_param
if request.user:
account = models.Account.current_user_account
default_context = account.default_context
else:
default_context = django_settings.DEFAULT_CONTEXT
context = _clean_int(get_param, default_context)
if context is not None and context not in models.CONTEXT_CHOICES:
context = django_settings.DEFAULT_CONTEXT
return context
def _get_column_width_for_user(request):
"""Returns the column width setting for a user."""
if request.user:
account = models.Account.current_user_account
default_column_width = account.default_column_width
else:
default_column_width = django_settings.DEFAULT_COLUMN_WIDTH
column_width = _clean_int(request.GET.get('column_width'),
default_column_width,
django_settings.MIN_COLUMN_WIDTH,
django_settings.MAX_COLUMN_WIDTH)
return column_width
@patch_filename_required
def diff(request):
"""/<issue>/diff/<patchset>/<patch> - View a patch as a side-by-side diff"""
if request.patch.no_base_file:
# Can't show side-by-side diff since we don't have the base file. Show the
# unified diff instead.
return patch_helper(request, 'diff')
patchset = request.patchset
patch = request.patch
patchsets = list(request.issue.patchset_set.order('created'))
context = _get_context_for_user(request)
column_width = _get_column_width_for_user(request)
if patch.is_binary:
rows = None
else:
try:
rows = _get_diff_table_rows(request, patch, context, column_width)
except FetchError, err:
return HttpTextResponse(str(err), status=404)
_add_next_prev(patchset, patch)
return respond(request, 'diff.html',
{'issue': request.issue,
'patchset': patchset,
'patch': patch,
'view_style': 'diff',
'rows': rows,
'context': context,
'context_values': models.CONTEXT_CHOICES,
'column_width': column_width,
'patchsets': patchsets,
})
def _get_diff_table_rows(request, patch, context, column_width):
"""Helper function that returns rendered rows for a patch.
Raises:
FetchError if patch parsing or download of base files fails.
"""
chunks = patching.ParsePatchToChunks(patch.lines, patch.filename)
if chunks is None:
raise FetchError('Can\'t parse the patch to chunks')
# Possible FetchErrors are handled in diff() and diff_skipped_lines().
content = request.patch.get_content()
rows = list(engine.RenderDiffTableRows(request, content.lines,
chunks, patch,
context=context,
colwidth=column_width))
if rows and rows[-1] is None:
del rows[-1]
# Get rid of content, which may be bad
if content.is_uploaded and content.text != None:
# Don't delete uploaded content, otherwise get_content()
# will fetch it.
content.is_bad = True
content.text = None
content.put()
else:
content.delete()
request.patch.content = None
request.patch.put()
return rows
@patch_required
@json_response
def diff_skipped_lines(request, id_before, id_after, where, column_width):
"""/<issue>/diff/<patchset>/<patch> - Returns a fragment of skipped lines.
*where* indicates which lines should be expanded:
'b' - move marker line to bottom and expand above
't' - move marker line to top and expand below
'a' - expand all skipped lines
"""
patch = request.patch
if where == 'a':
context = None
else:
context = _get_context_for_user(request) or 100
column_width = _clean_int(column_width, django_settings.DEFAULT_COLUMN_WIDTH,
django_settings.MIN_COLUMN_WIDTH,
django_settings.MAX_COLUMN_WIDTH)
try:
rows = _get_diff_table_rows(request, patch, None, column_width)
except FetchError, err:
return HttpTextResponse('Error: %s; please report!' % err, status=500)
return _get_skipped_lines_response(rows, id_before, id_after, where, context)
# there's no easy way to put a control character into a regex, so brute-force it
# this is all control characters except \r, \n, and \t
_badchars_re = re.compile(
r'[\000\001\002\003\004\005\006\007\010\013\014\016\017'
r'\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037]')
def _strip_invalid_xml(s):
"""Remove control chars other than \r\n\t from a string to be put in XML."""
if _badchars_re.search(s):
return ''.join(c for c in s if c >= ' ' or c in '\r\n\t')
else:
return s
def _get_skipped_lines_response(rows, id_before, id_after, where, context):
"""Helper function that returns response data for skipped lines"""
response_rows = []
id_before_start = int(id_before)
id_after_end = int(id_after)
if context is not None:
id_before_end = id_before_start+context
id_after_start = id_after_end-context
else:
id_before_end = id_after_start = None
for row in rows:
m = re.match('^<tr( name="hook")? id="pair-(?P<rowcount>\d+)">', row)
if m:
curr_id = int(m.groupdict().get("rowcount"))
# expand below marker line
if (where == 'b'
and curr_id > id_after_start and curr_id <= id_after_end):
response_rows.append(row)
# expand above marker line
elif (where == 't'
and curr_id >= id_before_start and curr_id < id_before_end):
response_rows.append(row)
# expand all skipped lines
elif (where == 'a'
and curr_id >= id_before_start and curr_id <= id_after_end):
response_rows.append(row)
if context is not None and len(response_rows) >= 2*context:
break
# Create a usable structure for the JS part
response = []
response_rows = [_strip_invalid_xml(r) for r in response_rows]
dom = ElementTree.parse(StringIO('<div>%s</div>' % "".join(response_rows)))
for node in dom.getroot().getchildren():
content = [[x.items(), x.text] for x in node.getchildren()]
response.append([node.items(), content])
return response
def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, patch_filename=None):
"""Helper function that returns objects for diff2 views"""
ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue)
if ps_left is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_left_id, status=404)
ps_left.issue = request.issue
ps_right = models.PatchSet.get_by_id(int(ps_right_id), parent=request.issue)
if ps_right is None:
return HttpTextResponse(
'No patch set exists with that id (%s)' % ps_right_id, status=404)
ps_right.issue = request.issue
if patch_id is not None:
patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right)
else:
patch_right = None
if patch_right is not None:
patch_right.patchset = ps_right
if patch_filename is None:
patch_filename = patch_right.filename
# Now find the corresponding patch in ps_left
patch_left = models.Patch.gql('WHERE patchset = :1 AND filename = :2',
ps_left, patch_filename).get()
if patch_left:
try:
new_content_left = patch_left.get_patched_content()
except FetchError, err:
return HttpTextResponse(str(err), status=404)
lines_left = new_content_left.lines
elif patch_right:
lines_left = patch_right.get_content().lines
else:
lines_left = []
if patch_right:
try:
new_content_right = patch_right.get_patched_content()
except FetchError, err:
return HttpTextResponse(str(err), status=404)
lines_right = new_content_right.lines
elif patch_left:
lines_right = patch_left.get_content().lines
else:
lines_right = []
rows = engine.RenderDiff2TableRows(request,
lines_left, patch_left,
lines_right, patch_right,
context=context,
colwidth=column_width)
rows = list(rows)
if rows and rows[-1] is None:
del rows[-1]
return dict(patch_left=patch_left, patch_right=patch_right,
ps_left=ps_left, ps_right=ps_right, rows=rows)
@issue_required
def diff2(request, ps_left_id, ps_right_id, patch_filename):
"""/<issue>/diff2/... - View the delta between two different patch sets."""
context = _get_context_for_user(request)
column_width = _get_column_width_for_user(request)
ps_right = models.PatchSet.get_by_id(int(ps_right_id), parent=request.issue)
patch_right = None
if ps_right:
patch_right = models.Patch.gql('WHERE patchset = :1 AND filename = :2',
ps_right, patch_filename).get()
if patch_right:
patch_id = patch_right.key().id()
elif patch_filename.isdigit():
# Perhaps it's an ID that's passed in, based on the old URL scheme.
patch_id = int(patch_filename)
else: # patch doesn't exist in this patchset
patch_id = None
data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, patch_filename)
if isinstance(data, HttpResponse) and data.status_code != 302:
return data
patchsets = list(request.issue.patchset_set.order('created'))
if data["patch_right"]:
_add_next_prev2(data["ps_left"], data["ps_right"], data["patch_right"])
return respond(request, 'diff2.html',
{'issue': request.issue,
'ps_left': data["ps_left"],
'patch_left': data["patch_left"],
'ps_right': data["ps_right"],
'patch_right': data["patch_right"],
'rows': data["rows"],
'patch_id': patch_id,
'context': context,
'context_values': models.CONTEXT_CHOICES,
'column_width': column_width,
'patchsets': patchsets,
'filename': patch_filename,
})
@issue_required
@json_response
def diff2_skipped_lines(request, ps_left_id, ps_right_id, patch_id,
id_before, id_after, where, column_width):
"""/<issue>/diff2/... - Returns a fragment of skipped lines"""
column_width = _clean_int(column_width, django_settings.DEFAULT_COLUMN_WIDTH,
django_settings.MIN_COLUMN_WIDTH,
django_settings.MAX_COLUMN_WIDTH)
if where == 'a':
context = None
else:
context = _get_context_for_user(request) or 100
data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, 10000,
column_width)
if isinstance(data, HttpResponse) and data.status_code != 302:
return data
return _get_skipped_lines_response(data["rows"], id_before, id_after,
where, context)
def _get_comment_counts(account, patchset):
"""Helper to get comment counts for all patches in a single query.
The helper returns two dictionaries comments_by_patch and
drafts_by_patch with patch key as key and comment count as
value. Patches without comments or drafts are not present in those
dictionaries.
"""
# A key-only query won't work because we need to fetch the patch key
# in the for loop further down.
comment_query = models.Comment.all()
comment_query.ancestor(patchset)
# Get all comment counts with one query rather than one per patch.
comments_by_patch = {}
drafts_by_patch = {}
for c in comment_query:
pkey = models.Comment.patch.get_value_for_datastore(c)
if not c.draft:
comments_by_patch[pkey] = comments_by_patch.setdefault(pkey, 0) + 1
elif account and c.author == account.user:
drafts_by_patch[pkey] = drafts_by_patch.setdefault(pkey, 0) + 1
return comments_by_patch, drafts_by_patch
def _add_next_prev(patchset, patch):
"""Helper to add .next and .prev attributes to a patch object."""
patch.prev = patch.next = None
patches = models.Patch.all().filter('patchset =', patchset.key()).order(
'filename').fetch(1000)
patchset.patches = patches # Required to render the jump to select.
comments_by_patch, drafts_by_patch = _get_comment_counts(
models.Account.current_user_account, patchset)
last_patch = None
next_patch = None
last_patch_with_comment = None
next_patch_with_comment = None
found_patch = False
for p in patches:
if p.filename == patch.filename:
found_patch = True
continue
p._num_comments = comments_by_patch.get(p.key(), 0)
p._num_drafts = drafts_by_patch.get(p.key(), 0)
if not found_patch:
last_patch = p
if p.num_comments > 0 or p.num_drafts > 0:
last_patch_with_comment = p
else:
if next_patch is None:
next_patch = p
if p.num_comments > 0 or p.num_drafts > 0:
next_patch_with_comment = p
# safe to stop scanning now because the next with out a comment
# will already have been filled in by some earlier patch
break
patch.prev = last_patch
patch.next = next_patch
patch.prev_with_comment = last_patch_with_comment
patch.next_with_comment = next_patch_with_comment
def _add_next_prev2(ps_left, ps_right, patch_right):
"""Helper to add .next and .prev attributes to a patch object."""
patch_right.prev = patch_right.next = None
patches = list(models.Patch.gql("WHERE patchset = :1 ORDER BY filename",
ps_right))
ps_right.patches = patches # Required to render the jump to select.
n_comments, n_drafts = _get_comment_counts(
models.Account.current_user_account, ps_right)
last_patch = None
next_patch = None
last_patch_with_comment = None
next_patch_with_comment = None
found_patch = False
for p in patches:
if p.filename == patch_right.filename:
found_patch = True
continue
p._num_comments = n_comments.get(p.key(), 0)
p._num_drafts = n_drafts.get(p.key(), 0)
if not found_patch:
last_patch = p
if ((p.num_comments > 0 or p.num_drafts > 0) and
ps_left.key().id() in p.delta):
last_patch_with_comment = p
else:
if next_patch is None:
next_patch = p
if ((p.num_comments > 0 or p.num_drafts > 0) and
ps_left.key().id() in p.delta):
next_patch_with_comment = p
# safe to stop scanning now because the next with out a comment
# will already have been filled in by some earlier patch
break
patch_right.prev = last_patch
patch_right.next = next_patch
patch_right.prev_with_comment = last_patch_with_comment
patch_right.next_with_comment = next_patch_with_comment
@post_required
def inline_draft(request):
"""/inline_draft - Ajax handler to submit an in-line draft comment.
This wraps _inline_draft(); all exceptions are logged and cause an
abbreviated response indicating something went wrong.
Note: creating or editing draft comments is *not* XSRF-protected,
because it is not unusual to come back after hours; the XSRF tokens
time out after 1 or 2 hours. The final submit of the drafts for
others to view *is* XSRF-protected.
"""
try:
return _inline_draft(request)
except Exception, err:
logging.exception('Exception in inline_draft processing:')
# TODO(guido): return some kind of error instead?
# Return HttpResponse for now because the JS part expects
# a 200 status code.
return HttpHtmlResponse(
'<font color="red">Error: %s; please report!</font>' %
err.__class__.__name__)
def _inline_draft(request):
"""Helper to submit an in-line draft comment."""
# TODO(guido): turn asserts marked with XXX into errors
# Don't use @login_required, since the JS doesn't understand redirects.
if not request.user:
# Don't log this, spammers have started abusing this.
return HttpTextResponse('Not logged in')
snapshot = request.POST.get('snapshot')
assert snapshot in ('old', 'new'), repr(snapshot)
left = (snapshot == 'old')
side = request.POST.get('side')
assert side in ('a', 'b'), repr(side) # Display left (a) or right (b)
issue_id = int(request.POST['issue'])
issue = models.Issue.get_by_id(issue_id)
assert issue # XXX
patchset_id = int(request.POST.get('patchset') or
request.POST[side == 'a' and 'ps_left' or 'ps_right'])
patchset = models.PatchSet.get_by_id(int(patchset_id), parent=issue)
assert patchset # XXX
patch_id = int(request.POST.get('patch') or
request.POST[side == 'a' and 'patch_left' or 'patch_right'])
patch = models.Patch.get_by_id(int(patch_id), parent=patchset)
assert patch # XXX
text = request.POST.get('text')
lineno = int(request.POST['lineno'])
message_id = request.POST.get('message_id')
comment = None
if message_id:
comment = models.Comment.get_by_key_name(message_id, parent=patch)
if comment is None or not comment.draft or comment.author != request.user:
comment = None
message_id = None
if not message_id:
# Prefix with 'z' to avoid key names starting with digits.
message_id = 'z' + binascii.hexlify(_random_bytes(16))
if not text.rstrip():
if comment is not None:
assert comment.draft and comment.author == request.user
comment.delete() # Deletion
comment = None
# Re-query the comment count.
models.Account.current_user_account.update_drafts(issue)
else:
if comment is None:
comment = models.Comment(key_name=message_id, parent=patch)
comment.patch = patch
comment.lineno = lineno
comment.left = left
comment.text = db.Text(text)
comment.message_id = message_id
comment.put()
# The actual count doesn't matter, just that there's at least one.
models.Account.current_user_account.update_drafts(issue, 1)
query = models.Comment.gql(
'WHERE patch = :patch AND lineno = :lineno AND left = :left '
'ORDER BY date',
patch=patch, lineno=lineno, left=left)
comments = list(c for c in query if not c.draft or c.author == request.user)
if comment is not None and comment.author is None:
# Show anonymous draft even though we don't save it
comments.append(comment)
if not comments:
return HttpTextResponse(' ')
for c in comments:
c.complete()
return render_to_response('inline_comment.html',
{'user': request.user,
'patch': patch,
'patchset': patchset,
'issue': issue,
'comments': comments,
'lineno': lineno,
'snapshot': snapshot,
'side': side,
},
context_instance=RequestContext(request))
def _get_affected_files(issue, full_diff=False):
"""Helper to return a list of affected files from the latest patchset.
Args:
issue: Issue instance.
full_diff: If true, include the entire diff even if it exceeds 100 lines.
Returns:
2-tuple containing a list of affected files, and the diff contents if it
is less than 100 lines (otherwise the second item is an empty string).
"""
files = []
modified_count = 0
diff = ''
patchsets = list(issue.patchset_set.order('created'))
if len(patchsets):
patchset = patchsets[-1]
for patch in patchset.patch_set.order('filename'):
file_str = ''
if patch.status:
file_str += patch.status + ' '
file_str += patch.filename
files.append(file_str)
# No point in loading patches if the patchset is too large for email.
if full_diff or modified_count < 100:
modified_count += patch.num_added + patch.num_removed
if full_diff or modified_count < 100:
diff = patchset.data
return files, diff
def _get_mail_template(request, issue, full_diff=False):
"""Helper to return the template and context for an email.
If this is the first email sent by the owner, a template that lists the
reviewers, description and files is used.
"""
context = {}
template = 'mails/comment.txt'
if request.user == issue.owner:
if db.GqlQuery('SELECT * FROM Message WHERE ANCESTOR IS :1 AND sender = :2',
issue, db.Email(request.user.email())).count(1) == 0:
template = 'mails/review.txt'
files, patch = _get_affected_files(issue, full_diff)
context.update({'files': files, 'patch': patch, 'base': issue.base})
return template, context
@login_required
@issue_required
@xsrf_required
def publish(request):
""" /<issue>/publish - Publish draft comments and send mail."""
issue = request.issue
if request.user == issue.owner:
form_class = PublishForm
else:
form_class = MiniPublishForm
draft_message = None
if not request.POST.get('message_only', None):
query = models.Message.gql(('WHERE issue = :1 AND sender = :2 '
'AND draft = TRUE'), issue,
request.user.email())
draft_message = query.get()
if request.method != 'POST':
reviewers = issue.reviewers[:]
cc = issue.cc[:]
if request.user != issue.owner and (request.user.email()
not in issue.reviewers):
reviewers.append(request.user.email())
if request.user.email() in cc:
cc.remove(request.user.email())
reviewers = [models.Account.get_nickname_for_email(reviewer,
default=reviewer)
for reviewer in reviewers]
ccs = [models.Account.get_nickname_for_email(cc, default=cc) for cc in cc]
tbd, comments = _get_draft_comments(request, issue, True)
preview = _get_draft_details(request, comments)
if draft_message is None:
msg = ''
else:
msg = draft_message.text
form = form_class(initial={'subject': issue.subject,
'reviewers': ', '.join(reviewers),
'cc': ', '.join(ccs),
'send_mail': True,
'message': msg,
})
return respond(request, 'publish.html', {'form': form,
'issue': issue,
'preview': preview,
'draft_message': draft_message,
})
form = form_class(request.POST)
if not form.is_valid():
return respond(request, 'publish.html', {'form': form, 'issue': issue})
if request.user == issue.owner:
issue.subject = form.cleaned_data['subject']
if form.is_valid() and not form.cleaned_data.get('message_only', False):
reviewers = _get_emails(form, 'reviewers')
else:
reviewers = issue.reviewers
if request.user != issue.owner and request.user.email() not in reviewers:
reviewers.append(db.Email(request.user.email()))
if form.is_valid() and not form.cleaned_data.get('message_only', False):
cc = _get_emails(form, 'cc')
else:
cc = issue.cc
# The user is in the reviewer list, remove them from CC if they're there.
if request.user.email() in cc:
cc.remove(request.user.email())
if not form.is_valid():
return respond(request, 'publish.html', {'form': form, 'issue': issue})
issue.reviewers = reviewers
issue.cc = cc
if not form.cleaned_data.get('message_only', False):
tbd, comments = _get_draft_comments(request, issue)
else:
tbd = []
comments = []
issue.update_comment_count(len(comments))
tbd.append(issue)
if comments:
logging.warn('Publishing %d comments', len(comments))
msg = _make_message(request, issue,
form.cleaned_data['message'],
comments,
form.cleaned_data['send_mail'],
draft=draft_message,
in_reply_to=form.cleaned_data.get('in_reply_to'))
tbd.append(msg)
for obj in tbd:
db.put(obj)
_notify_issue(request, issue, 'Comments published')
# There are now no comments here (modulo race conditions)
models.Account.current_user_account.update_drafts(issue, 0)
if form.cleaned_data.get('no_redirect', False):
return HttpTextResponse('OK')
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _encode_safely(s):
"""Helper to turn a unicode string into 8-bit bytes."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return s
def _get_draft_comments(request, issue, preview=False):
"""Helper to return objects to put() and a list of draft comments.
If preview is True, the list of objects to put() is empty to avoid changes
to the datastore.
Args:
request: Django Request object.
issue: Issue instance.
preview: Preview flag (default: False).
Returns:
2-tuple (put_objects, comments).
"""
comments = []
tbd = []
# XXX Should request all drafts for this issue once, now we can.
for patchset in issue.patchset_set.order('created'):
ps_comments = list(models.Comment.gql(
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
patchset, request.user))
if ps_comments:
patches = dict((p.key(), p) for p in patchset.patch_set)
for p in patches.itervalues():
p.patchset = patchset
for c in ps_comments:
c.draft = False
# Get the patch key value without loading the patch entity.
# NOTE: Unlike the old version of this code, this is the
# recommended and documented way to do this!
pkey = models.Comment.patch.get_value_for_datastore(c)
if pkey in patches:
patch = patches[pkey]
c.patch = patch
if not preview:
tbd.append(ps_comments)
patchset.update_comment_count(len(ps_comments))
tbd.append(patchset)
ps_comments.sort(key=lambda c: (c.patch.filename, not c.left,
c.lineno, c.date))
comments += ps_comments
return tbd, comments
def _patchlines2cache(patchlines, left):
"""Helper that converts return value of ParsePatchToLines for caching.
Each line in patchlines is (old_line_no, new_line_no, line). When
comment is on the left we store the old_line_no, otherwise
new_line_no.
"""
if left:
it = ((old, line) for old, _, line in patchlines)
else:
it = ((new, line) for _, new, line in patchlines)
return dict(it)
def _get_draft_details(request, comments):
"""Helper to display comments with context in the email message."""
last_key = None
output = []
linecache = {} # Maps (c.patch.key(), c.left) to mapping (lineno, line)
modified_patches = []
fetch_base_failed = False
for c in comments:
if (c.patch.key(), c.left) != last_key:
url = request.build_absolute_uri(
reverse(diff, args=[request.issue.key().id(),
c.patch.patchset.key().id(),
c.patch.filename]))
output.append('\n%s\nFile %s (%s):' % (url, c.patch.filename,
c.left and "left" or "right"))
last_key = (c.patch.key(), c.left)
patch = c.patch
if patch.no_base_file:
linecache[last_key] = _patchlines2cache(
patching.ParsePatchToLines(patch.lines), c.left)
else:
try:
if c.left:
old_lines = patch.get_content().text.splitlines(True)
linecache[last_key] = dict(enumerate(old_lines, 1))
else:
new_lines = patch.get_patched_content().text.splitlines(True)
linecache[last_key] = dict(enumerate(new_lines, 1))
except FetchError:
linecache[last_key] = _patchlines2cache(
patching.ParsePatchToLines(patch.lines), c.left)
fetch_base_failed = True
context = linecache[last_key].get(c.lineno, '').strip()
url = request.build_absolute_uri(
'%s#%scode%d' % (reverse(diff, args=[request.issue.key().id(),
c.patch.patchset.key().id(),
c.patch.filename]),
c.left and "old" or "new",
c.lineno))
output.append('\n%s\n%s:%d: %s\n%s' % (url, c.patch.filename, c.lineno,
context, c.text.rstrip()))
if modified_patches:
db.put(modified_patches)
return '\n'.join(output)
def _make_message(request, issue, message, comments=None, send_mail=False,
draft=None, in_reply_to=None):
"""Helper to create a Message instance and optionally send an email."""
attach_patch = request.POST.get("attach_patch") == "yes"
template, context = _get_mail_template(request, issue, full_diff=attach_patch)
# Decide who should receive mail
my_email = db.Email(request.user.email())
to = [db.Email(issue.owner.email())] + issue.reviewers
cc = issue.cc[:]
if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:
cc.append(db.Email(django_settings.RIETVELD_INCOMING_MAIL_ADDRESS))
reply_to = to + cc
if my_email in to and len(to) > 1: # send_mail() wants a non-empty to list
to.remove(my_email)
if my_email in cc:
cc.remove(my_email)
issue_id = issue.key().id()
subject = '%s (issue %d)' % (issue.subject, issue_id)
patch = None
if attach_patch:
subject = 'PATCH: ' + subject
if 'patch' in context:
patch = context['patch']
del context['patch']
if issue.message_set.count(1) > 0:
subject = 'Re: ' + subject
if comments:
details = _get_draft_details(request, comments)
else:
details = ''
message = message.replace('\r\n', '\n')
text = ((message.strip() + '\n\n' + details.strip())).strip()
if draft is None:
msg = models.Message(issue=issue,
subject=subject,
sender=my_email,
recipients=reply_to,
text=db.Text(text),
parent=issue)
else:
msg = draft
msg.subject = subject
msg.recipients = reply_to
msg.text = db.Text(text)
msg.draft = False
msg.date = datetime.datetime.now()
if in_reply_to:
try:
msg.in_reply_to = models.Message.get(in_reply_to)
replied_issue_id = msg.in_reply_to.issue.key().id()
if replied_issue_id != issue_id:
logging.warn('In-reply-to Message is for a different issue: '
'%s instead of %s', replied_issue_id, issue_id)
msg.in_reply_to = None
except (db.KindError, db.BadKeyError):
logging.warn('Invalid in-reply-to Message or key given: %s', in_reply_to)
if send_mail:
# Limit the list of files in the email to approximately 200
if 'files' in context and len(context['files']) > 210:
num_trimmed = len(context['files']) - 200
del context['files'][200:]
context['files'].append('[[ %d additional files ]]' % num_trimmed)
url = request.build_absolute_uri(reverse(show, args=[issue.key().id()]))
reviewer_nicknames = ', '.join(library.get_nickname(rev_temp, True,
request)
for rev_temp in issue.reviewers)
cc_nicknames = ', '.join(library.get_nickname(cc_temp, True, request)
for cc_temp in cc)
my_nickname = library.get_nickname(request.user, True, request)
reply_to = ', '.join(reply_to)
description = (issue.description or '').replace('\r\n', '\n')
home = request.build_absolute_uri(reverse(index))
context.update({'reviewer_nicknames': reviewer_nicknames,
'cc_nicknames': cc_nicknames,
'my_nickname': my_nickname, 'url': url,
'message': message, 'details': details,
'description': description, 'home': home,
})
for key, value in context.iteritems():
if isinstance(value, str):
try:
encoding.force_unicode(value)
except UnicodeDecodeError:
logging.error('Key %s is not valid unicode. value: %r' % (key, value))
# The content failed to be decoded as utf-8. Enforce it as ASCII.
context[key] = value.decode('ascii', 'replace')
body = django.template.loader.render_to_string(
template, context, context_instance=RequestContext(request))
logging.warn('Mail: to=%s; cc=%s', ', '.join(to), ', '.join(cc))
send_args = {'sender': my_email,
'to': [_encode_safely(address) for address in to],
'subject': _encode_safely(subject),
'body': _encode_safely(body),
'reply_to': _encode_safely(reply_to)}
if cc:
send_args['cc'] = [_encode_safely(address) for address in cc]
if patch:
send_args['attachments'] = [('issue_%s_patch.diff' % issue.key().id(),
patch)]
attempts = 0
while True:
try:
mail.send_mail(**send_args)
break
except apiproxy_errors.DeadlineExceededError:
# apiproxy_errors.DeadlineExceededError is raised when the
# deadline of an API call is reached (e.g. for mail it's
# something about 5 seconds). It's not the same as the lethal
# runtime.DeadlineExeededError.
attempts += 1
if attempts >= 3:
raise
if attempts:
logging.warning("Retried sending email %s times", attempts)
return msg
@post_required
@login_required
@xsrf_required
@issue_required
def star(request):
"""Add a star to an Issue."""
account = models.Account.current_user_account
account.user_has_selected_nickname() # This will preserve account.fresh.
if account.stars is None:
account.stars = []
id = request.issue.key().id()
if id not in account.stars:
account.stars.append(id)
account.put()
return respond(request, 'issue_star.html', {'issue': request.issue})
@post_required
@login_required
@issue_required
@xsrf_required
def unstar(request):
"""Remove the star from an Issue."""
account = models.Account.current_user_account
account.user_has_selected_nickname() # This will preserve account.fresh.
if account.stars is None:
account.stars = []
id = request.issue.key().id()
if id in account.stars:
account.stars[:] = [i for i in account.stars if i != id]
account.put()
return respond(request, 'issue_star.html', {'issue': request.issue})
@login_required
@issue_required
def draft_message(request):
"""/<issue>/draft_message - Retrieve, modify and delete draft messages.
Note: creating or editing draft messages is *not* XSRF-protected,
because it is not unusual to come back after hours; the XSRF tokens
time out after 1 or 2 hours. The final submit of the drafts for
others to view *is* XSRF-protected.
"""
query = models.Message.gql(('WHERE issue = :1 AND sender = :2 '
'AND draft = TRUE'),
request.issue, request.user.email())
if query.count() == 0:
draft_message = None
else:
draft_message = query.get()
if request.method == 'GET':
return _get_draft_message(draft_message)
elif request.method == 'POST':
return _post_draft_message(request, draft_message)
elif request.method == 'DELETE':
return _delete_draft_message(draft_message)
return HttpTextResponse('An error occurred.', status=500)
def _get_draft_message(draft):
"""Handles GET requests to /<issue>/draft_message.
Arguments:
draft: A Message instance or None.
Returns the content of a draft message or an empty string if draft is None.
"""
return HttpTextResponse(draft.text if draft else '')
def _post_draft_message(request, draft):
"""Handles POST requests to /<issue>/draft_message.
If draft is None a new message is created.
Arguments:
request: The current request.
draft: A Message instance or None.
"""
if draft is None:
draft = models.Message(issue=request.issue, parent=request.issue,
sender=request.user.email(), draft=True)
draft.text = request.POST.get('reviewmsg')
draft.put()
return HttpTextResponse(draft.text)
def _delete_draft_message(draft):
"""Handles DELETE requests to /<issue>/draft_message.
Deletes a draft message.
Arguments:
draft: A Message instance or None.
"""
if draft is not None:
draft.delete()
return HttpTextResponse('OK')
@json_response
def search(request):
"""/search - Search for issues or patchset.
Returns HTTP 500 if the corresponding index is missing.
"""
if request.method == 'GET':
form = SearchForm(request.GET)
if not form.is_valid() or not request.GET:
return respond(request, 'search.html', {'form': form})
else:
form = SearchForm(request.POST)
if not form.is_valid():
return HttpTextResponse('Invalid arguments', status=400)
logging.info('%s' % form.cleaned_data)
keys_only = form.cleaned_data['keys_only'] or False
format = form.cleaned_data['format'] or 'html'
limit = form.cleaned_data['limit']
with_messages = form.cleaned_data['with_messages']
if format == 'html':
keys_only = False
limit = limit or DEFAULT_LIMIT
else:
if not limit:
if keys_only:
# It's a fast query.
limit = 1000
elif with_messages:
# It's an heavy query.
limit = 10
else:
limit = 100
q = models.Issue.all(keys_only=keys_only)
if form.cleaned_data['cursor']:
q.with_cursor(form.cleaned_data['cursor'])
if form.cleaned_data['closed'] is not None:
q.filter('closed = ', form.cleaned_data['closed'])
if form.cleaned_data['owner']:
q.filter('owner = ', form.cleaned_data['owner'])
if form.cleaned_data['reviewer']:
q.filter('reviewers = ', form.cleaned_data['reviewer'])
if form.cleaned_data['private'] is not None:
q.filter('private = ', form.cleaned_data['private'])
if form.cleaned_data['repo_guid']:
q.filter('repo_guid = ', form.cleaned_data['repo_guid'])
if form.cleaned_data['base']:
q.filter('base = ', form.cleaned_data['base'])
# Default sort by ascending key to save on indexes.
sorted_by = '__key__'
if form.cleaned_data['modified_before']:
q.filter('modified < ', form.cleaned_data['modified_before'])
sorted_by = 'modified'
if form.cleaned_data['modified_after']:
q.filter('modified >= ', form.cleaned_data['modified_after'])
sorted_by = 'modified'
if form.cleaned_data['created_before']:
q.filter('created < ', form.cleaned_data['created_before'])
sorted_by = 'created'
if form.cleaned_data['created_after']:
q.filter('created >= ', form.cleaned_data['created_after'])
sorted_by = 'created'
q.order(sorted_by)
# Update the cursor value in the result.
if format == 'html':
nav_params = dict(
(k, v) for k, v in form.cleaned_data.iteritems() if v is not None)
return _paginate_issues_with_cursor(
reverse(search),
request,
q,
limit,
'search_results.html',
extra_nav_parameters=nav_params)
results = q.fetch(limit)
form.cleaned_data['cursor'] = q.cursor()
if keys_only:
# There's not enough information to filter. The only thing that is leaked is
# the issue's key.
filtered_results = results
else:
filtered_results = [i for i in results if _can_view_issue(request.user, i)]
data = {
'cursor': form.cleaned_data['cursor'],
}
if keys_only:
data['results'] = [i.id() for i in filtered_results]
else:
data['results'] = [_issue_as_dict(i, with_messages, request)
for i in filtered_results]
return data
### Repositories and Branches ###
def repos(request):
"""/repos - Show the list of known Subversion repositories."""
# Clean up garbage created by buggy edits
bad_branches = models.Branch.gql('WHERE owner = :1', None).fetch(100)
if bad_branches:
db.delete(bad_branches)
repo_map = {}
for repo in models.Repository.all().fetch(1000, batch_size=100):
repo_map[str(repo.key())] = repo
branches = []
for branch in models.Branch.all().fetch(2000, batch_size=100):
# Using ._repo instead of .repo returns the db.Key of the referenced entity.
# Access to a protected member FOO of a client class
# pylint: disable=W0212
branch.repository = repo_map[str(branch._repo)]
branches.append(branch)
branches.sort(key=lambda b: map(
unicode.lower, (b.repository.name, b.category, b.name)))
return respond(request, 'repos.html', {'branches': branches})
@login_required
@xsrf_required
def repo_new(request):
"""/repo_new - Create a new Subversion repository record."""
if request.method != 'POST':
form = RepoForm()
return respond(request, 'repo_new.html', {'form': form})
form = RepoForm(request.POST)
errors = form.errors
if not errors:
try:
repo = models.Repository(
name=form.cleaned_data.get('name'),
url=form.cleaned_data.get('url'),
guid=form.cleaned_data.get('guid'),
)
except (db.BadValueError, ValueError), err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'repo_new.html', {'form': form})
repo.put()
branch_url = repo.url
if not branch_url.endswith('/'):
branch_url += '/'
branch_url += 'trunk/'
branch = models.Branch(repo=repo, repo_name=repo.name,
category='*trunk*', name='Trunk',
url=branch_url)
branch.put()
return HttpResponseRedirect(reverse(repos))
SVN_ROOT = 'http://svn.python.org/view/*checkout*/python/'
BRANCHES = [
# category, name, url suffix
('*trunk*', 'Trunk', 'trunk/'),
('branch', '2.5', 'branches/release25-maint/'),
('branch', 'py3k', 'branches/py3k/'),
]
# TODO: Make this a POST request to avoid XSRF attacks.
@admin_required
def repo_init(_request):
"""/repo_init - Initialze the list of known Subversion repositories."""
python = models.Repository.gql("WHERE name = 'Python'").get()
if python is None:
python = models.Repository(name='Python', url=SVN_ROOT)
python.put()
pybranches = []
else:
pybranches = list(models.Branch.gql('WHERE repo = :1', python))
for category, name, url in BRANCHES:
url = python.url + url
for br in pybranches:
if (br.category, br.name, br.url) == (category, name, url):
break
else:
br = models.Branch(repo=python, repo_name='Python',
category=category, name=name, url=url)
br.put()
return HttpResponseRedirect(reverse(repos))
@login_required
@xsrf_required
def branch_new(request, repo_id):
"""/branch_new/<repo> - Add a new Branch to a Repository record."""
repo = models.Repository.get_by_id(int(repo_id))
if request.method != 'POST':
form = BranchForm(initial={'url': repo.url,
'category': 'branch',
})
return respond(request, 'branch_new.html', {'form': form, 'repo': repo})
form = BranchForm(request.POST)
errors = form.errors
if not errors:
try:
branch = models.Branch(
repo=repo,
category=form.cleaned_data.get('category'),
name=form.cleaned_data.get('name'),
url=form.cleaned_data.get('url'),
)
except (db.BadValueError, ValueError), err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'branch_new.html', {'form': form, 'repo': repo})
branch.repo_name = repo.name
branch.put()
return HttpResponseRedirect(reverse(repos))
@login_required
@xsrf_required
def branch_edit(request, branch_id):
"""/branch_edit/<branch> - Edit a Branch record."""
branch = models.Branch.get_by_id(int(branch_id))
if branch.owner != request.user:
return HttpTextResponse('You do not own this branch', status=403)
if request.method != 'POST':
form = BranchForm(initial={'category': branch.category,
'name': branch.name,
'url': branch.url,
})
return respond(request, 'branch_edit.html',
{'branch': branch, 'form': form})
form = BranchForm(request.POST)
errors = form.errors
if not errors:
try:
branch.category = form.cleaned_data.get('category')
branch.name = form.cleaned_data.get('name')
branch.url = form.cleaned_data.get('url')
except (db.BadValueError, ValueError), err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'branch_edit.html',
{'branch': branch, 'form': form})
branch.put()
return HttpResponseRedirect(reverse(repos))
@post_required
@login_required
@xsrf_required
def branch_delete(request, branch_id):
"""/branch_delete/<branch> - Delete a Branch record."""
branch = models.Branch.get_by_id(int(branch_id))
if branch.owner != request.user:
return HttpTextResponse('You do not own this branch', status=403)
repo = branch.repo
branch.delete()
num_branches = models.Branch.gql('WHERE repo = :1', repo).count()
if not num_branches:
# Even if we don't own the repository? Yes, I think so! Empty
# repositories have no representation on screen.
repo.delete()
return HttpResponseRedirect(reverse(repos))
### User Profiles ###
@login_required
@xsrf_required
def settings(request):
account = models.Account.current_user_account
if request.method != 'POST':
nickname = account.nickname
default_context = account.default_context
default_column_width = account.default_column_width
form = SettingsForm(initial={'nickname': nickname,
'context': default_context,
'column_width': default_column_width,
'notify_by_email': account.notify_by_email,
'notify_by_chat': account.notify_by_chat,
})
chat_status = None
if account.notify_by_chat:
try:
presence = xmpp.get_presence(account.email)
except Exception, err:
logging.error('Exception getting XMPP presence: %s', err)
chat_status = 'Error (%s)' % err
else:
if presence:
chat_status = 'online'
else:
chat_status = 'offline'
return respond(request, 'settings.html', {'form': form,
'chat_status': chat_status})
form = SettingsForm(request.POST)
if form.is_valid():
account.nickname = form.cleaned_data.get('nickname')
account.default_context = form.cleaned_data.get('context')
account.default_column_width = form.cleaned_data.get('column_width')
account.notify_by_email = form.cleaned_data.get('notify_by_email')
notify_by_chat = form.cleaned_data.get('notify_by_chat')
must_invite = notify_by_chat and not account.notify_by_chat
account.notify_by_chat = notify_by_chat
account.fresh = False
account.put()
if must_invite:
logging.info('Sending XMPP invite to %s', account.email)
try:
xmpp.send_invite(account.email)
except Exception, err:
# XXX How to tell user it failed?
logging.error('XMPP invite to %s failed', account.email)
else:
return respond(request, 'settings.html', {'form': form})
return HttpResponseRedirect(reverse(mine))
@post_required
@login_required
@xsrf_required
def account_delete(_request):
account = models.Account.current_user_account
account.delete()
return HttpResponseRedirect(users.create_logout_url(reverse(index)))
@login_required
@xsrf_required
def migrate_entities(request):
msg = None
if request.method == 'POST':
form = MigrateEntitiesForm(request.POST)
form.set_user(request.user)
if form.is_valid():
# verify that the account belongs to the user
old_account = form.cleaned_data['account']
old_account_key = str(old_account.key())
new_account_key = str(models.Account.current_user_account.key())
for kind in ('Issue', 'Repository', 'Branch'):
taskqueue.add(url=reverse(task_migrate_entities),
params={'kind': kind,
'old': old_account_key,
'new': new_account_key})
msg = (u'Migration job started. The issues, repositories and branches'
u' created with your old account (%s) will be moved to your'
u' current account (%s) in a background task and should'
u' be visible for your current account shortly.'
% (old_account.user.email(), request.user.email()))
else:
form = MigrateEntitiesForm()
return respond(request, 'migrate_entities.html', {'form': form, 'msg': msg})
@post_required
def task_migrate_entities(request):
"""/tasks/migrate_entities - Migrates entities from one account to another."""
kind = request.POST.get('kind')
old = request.POST.get('old')
new = request.POST.get('new')
batch_size = 20
if kind is None or old is None or new is None:
logging.warning('Missing parameters')
return HttpResponse()
if kind not in ('Issue', 'Repository', 'Branch'):
logging.warning('Invalid kind: %s' % kind)
return HttpResponse()
old_account = models.Account.get(db.Key(old))
new_account = models.Account.get(db.Key(new))
if old_account is None or new_account is None:
logging.warning('Invalid accounts')
return HttpResponse()
# make sure that accounts match
if old_account.user.user_id() != new_account.user.user_id():
logging.warning('Accounts don\'t match')
return HttpResponse()
model = getattr(models, kind)
key = request.POST.get('key')
query = model.all().filter('owner =', old_account.user)
if key:
query = query.filter('__key__ >', db.Key(key))
query = query.order('__key__')
tbd = []
for entity in query.fetch(batch_size):
entity.owner = new_account.user
tbd.append(entity)
if tbd:
db.put(tbd)
taskqueue.add(url=reverse(task_migrate_entities),
params={'kind': kind, 'old': old, 'new': new,
'key': str(tbd[-1].key())})
return HttpResponse()
@user_key_required
def user_popup(request):
"""/user_popup - Pop up to show the user info."""
try:
return _user_popup(request)
except Exception, err:
logging.exception('Exception in user_popup processing:')
# Return HttpResponse because the JS part expects a 200 status code.
return HttpHtmlResponse(
'<font color="red">Error: %s; please report!</font>' %
err.__class__.__name__)
def _user_popup(request):
user = request.user_to_show
popup_html = memcache.get('user_popup:' + user.email())
if popup_html is None:
num_issues_created = db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND owner = :1',
user).count()
num_issues_reviewed = db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND reviewers = :1',
user.email()).count()
user.nickname = models.Account.get_nickname_for_email(user.email())
popup_html = render_to_response('user_popup.html',
{'user': user,
'num_issues_created': num_issues_created,
'num_issues_reviewed': num_issues_reviewed,
},
context_instance=RequestContext(request))
# Use time expired cache because the number of issues will change over time
memcache.add('user_popup:' + user.email(), popup_html, 60)
return popup_html
@post_required
def incoming_chat(request):
"""/_ah/xmpp/message/chat/
This handles incoming XMPP (chat) messages.
Just reply saying we ignored the chat.
"""
try:
msg = xmpp.Message(request.POST)
except xmpp.InvalidMessageError, err:
logging.warn('Incoming invalid chat message: %s' % err)
return HttpTextResponse('')
sts = msg.reply('Sorry, Rietveld does not support chat input')
logging.debug('XMPP status %r', sts)
return HttpTextResponse('')
@post_required
def incoming_mail(request, recipients):
"""/_ah/mail/(.*)
Handle incoming mail messages.
The issue is not modified. No reviewers or CC's will be added or removed.
"""
try:
_process_incoming_mail(request.raw_post_data, recipients)
except InvalidIncomingEmailError, err:
logging.debug(str(err))
return HttpTextResponse('')
def _process_incoming_mail(raw_message, recipients):
"""Process an incoming email message."""
recipients = [x[1] for x in email.utils.getaddresses([recipients])]
incoming_msg = mail.InboundEmailMessage(raw_message)
if 'X-Google-Appengine-App-Id' in incoming_msg.original:
raise InvalidIncomingEmailError('Mail sent by App Engine')
subject = incoming_msg.subject or ''
match = re.search(r'\(issue *(?P<id>\d+)\)$', subject)
if match is None:
raise InvalidIncomingEmailError('No issue id found: %s', subject)
issue_id = int(match.groupdict()['id'])
issue = models.Issue.get_by_id(issue_id)
if issue is None:
raise InvalidIncomingEmailError('Unknown issue ID: %d' % issue_id)
sender = email.utils.parseaddr(incoming_msg.sender)[1]
body = None
for _, payload in incoming_msg.bodies('text/plain'):
# FIXME(andi): Remove this when issue 2383 is fixed.
# 8bit encoding results in UnknownEncodingError, see
# http://code.google.com/p/googleappengine/issues/detail?id=2383
# As a workaround we try to decode the payload ourselves.
if payload.encoding == '8bit' and payload.charset:
body = payload.payload.decode(payload.charset)
else:
body = payload.decode()
break
if body is None or not body.strip():
raise InvalidIncomingEmailError('Ignoring empty message.')
elif len(body) > django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE:
# see issue325, truncate huge bodies
trunc_msg = '... (message truncated)'
end = django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE - len(trunc_msg)
body = body[:end]
body += trunc_msg
# If the subject is long, this might come wrapped into more than one line.
subject = ' '.join([x.strip() for x in subject.splitlines()])
msg = models.Message(issue=issue, parent=issue,
subject=subject,
sender=db.Email(sender),
recipients=[db.Email(x) for x in recipients],
date=datetime.datetime.now(),
text=db.Text(body),
draft=False)
msg.put()
# Add sender to reviewers if needed.
all_emails = [str(x).lower()
for x in [issue.owner.email()]+issue.reviewers+issue.cc]
if sender.lower() not in all_emails:
query = models.Account.all().filter('lower_email =', sender.lower())
account = query.get()
if account is not None:
issue.reviewers.append(account.email) # e.g. account.email is CamelCase
else:
issue.reviewers.append(db.Email(sender))
issue.put()
@login_required
def xsrf_token(request):
"""/xsrf_token - Return the user's XSRF token.
This is used by tools like git-cl that need to be able to interact with the
site on the user's behalf. A custom header named X-Requesting-XSRF-Token must
be included in the HTTP request; an error is returned otherwise.
"""
if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):
return HttpTextResponse(
'Please include a header named X-Requesting-XSRF-Token '
'(its content doesn\'t matter).',
status=400)
return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())
def customized_upload_py(request):
"""/static/upload.py - Return patched upload.py with appropiate auth type and
default review server setting.
This is used to let the user download a customized upload.py script
for hosted Rietveld instances.
"""
f = open(django_settings.UPLOAD_PY_SOURCE)
source = f.read()
f.close()
# When served from a Google Apps instance, the account namespace needs to be
# switched to "Google Apps only".
if ('AUTH_DOMAIN' in request.META
and request.META['AUTH_DOMAIN'] != 'gmail.com'):
source = source.replace('AUTH_ACCOUNT_TYPE = "GOOGLE"',
'AUTH_ACCOUNT_TYPE = "HOSTED"')
# On a non-standard instance, the default review server is changed to the
# current hostname. This might give weird results when using versioned appspot
# URLs (eg. 1.latest.codereview.appspot.com), but this should only affect
# testing.
if request.META['HTTP_HOST'] != 'codereview.appspot.com':
review_server = request.META['HTTP_HOST']
if request.is_secure():
review_server = 'https://' + review_server
source = source.replace('DEFAULT_REVIEW_SERVER = "codereview.appspot.com"',
'DEFAULT_REVIEW_SERVER = "%s"' % review_server)
return HttpResponse(source, content_type='text/x-python; charset=utf-8')
@post_required
def calculate_delta(request):
"""/calculate_delta - Calculate deltas for a patchset.
This URL is called by taskqueue to calculate deltas behind the
scenes. Returning a HttpResponse with any 2xx status means that the
task was finished successfully. Raising an exception means that the
taskqueue will retry to run the task.
This code is similar to the code in _get_patchset_info() which is
run when a patchset should be displayed in the UI.
"""
key = request.POST.get('key')
if not key:
logging.debug('No key given.')
return HttpResponse()
try:
patchset = models.PatchSet.get(key)
except (db.KindError, db.BadKeyError), err:
logging.debug('Invalid PatchSet key %r: %s' % (key, err))
return HttpResponse()
if patchset is None: # e.g. PatchSet was deleted inbetween
return HttpResponse()
patchset_id = patchset.key().id()
patchsets = None
for patch in patchset.patch_set.filter('delta_calculated =', False):
if patchsets is None:
# patchsets is retrieved on first iteration because patchsets
# isn't needed outside the loop at all.
patchsets = list(patchset.issue.patchset_set.order('created'))
patch.delta = _calculate_delta(patch, patchset_id, patchsets)
patch.delta_calculated = True
patch.put()
return HttpResponse()
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diff rendering in HTML for Rietveld."""
import cgi
import difflib
import re
from google.appengine.api import users
from django.conf import settings
from django.template import loader, RequestContext
from codereview import intra_region_diff
from codereview import models
from codereview import patching
from codereview import utils
# NOTE: The SplitPatch function is duplicated in upload.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
_, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
_, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def ParsePatchSet(patchset):
"""Patch a patch set into individual patches.
Args:
patchset: a models.PatchSet instance.
Returns:
A list of models.Patch instances.
"""
patches = []
for filename, text in SplitPatch(patchset.data):
patches.append(models.Patch(patchset=patchset, text=utils.to_dbtext(text),
filename=filename, parent=patchset))
return patches
def RenderDiffTableRows(request, old_lines, chunks, patch,
colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False,
context=settings.DEFAULT_CONTEXT):
"""Render the HTML table rows for a side-by-side diff for a patch.
Args:
request: Django Request object.
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
patch: A models.Patch instance.
colwidth: Optional column width (default 80).
debug: Optional debugging flag (default False).
context: Maximum number of rows surrounding a change (default CONTEXT).
Yields:
Strings, each of which represents the text rendering one complete
pair of lines of the side-by-side diff, possibly including comments.
Each yielded string may consist of several <tr> elements.
"""
rows = _RenderDiffTableRows(request, old_lines, chunks, patch,
colwidth, debug)
return _CleanupTableRowsGenerator(rows, context)
def RenderDiff2TableRows(request, old_lines, old_patch, new_lines, new_patch,
colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False,
context=settings.DEFAULT_CONTEXT):
"""Render the HTML table rows for a side-by-side diff between two patches.
Args:
request: Django Request object.
old_lines: List of lines representing the patched file on the left.
old_patch: The models.Patch instance corresponding to old_lines.
new_lines: List of lines representing the patched file on the right.
new_patch: The models.Patch instance corresponding to new_lines.
colwidth: Optional column width (default 80).
debug: Optional debugging flag (default False).
context: Maximum number of visible context lines (default
settings.DEFAULT_CONTEXT).
Yields:
Strings, each of which represents the text rendering one complete
pair of lines of the side-by-side diff, possibly including comments.
Each yielded string may consist of several <tr> elements.
"""
rows = _RenderDiff2TableRows(request, old_lines, old_patch,
new_lines, new_patch, colwidth, debug)
return _CleanupTableRowsGenerator(rows, context)
def _CleanupTableRowsGenerator(rows, context):
"""Cleanup rows returned by _TableRowGenerator for output.
Args:
rows: List of tuples (tag, text)
context: Maximum number of visible context lines.
Yields:
Rows marked as 'equal' are possibly contracted using _ShortenBuffer().
Stops on rows marked as 'error'.
"""
buffer = []
for tag, text in rows:
if tag == 'equal':
buffer.append(text)
continue
else:
for t in _ShortenBuffer(buffer, context):
yield t
buffer = []
yield text
if tag == 'error':
yield None
break
if buffer:
for t in _ShortenBuffer(buffer, context):
yield t
def _ShortenBuffer(buffer, context):
"""Render a possibly contracted series of HTML table rows.
Args:
buffer: a list of strings representing HTML table rows.
context: Maximum number of visible context lines. If None all lines are
returned.
Yields:
If the buffer has fewer than 3 times context items, yield all
the items. Otherwise, yield the first context items, a single
table row representing the contraction, and the last context
items.
"""
if context is None or len(buffer) < 3*context:
for t in buffer:
yield t
else:
last_id = None
for t in buffer[:context]:
m = re.match('^<tr( name="hook")? id="pair-(?P<rowcount>\d+)">', t)
if m:
last_id = int(m.groupdict().get("rowcount"))
yield t
skip = len(buffer) - 2*context
expand_link = []
if skip > 3*context:
expand_link.append(('<a href="javascript:M_expandSkipped(%(before)d, '
'%(after)d, \'t\', %(skip)d)">'
'Expand %(context)d before'
'</a> | '))
expand_link.append(('<a href="javascript:M_expandSkipped(%(before)d, '
'%(after)d, \'a\', %(skip)d)">Expand all</a>'))
if skip > 3*context:
expand_link.append((' | '
'<a href="javascript:M_expandSkipped(%(before)d, '
'%(after)d, \'b\', %(skip)d)">'
'Expand %(context)d after'
'</a>'))
expand_link = ''.join(expand_link) % {'before': last_id+1,
'after': last_id+skip,
'skip': last_id,
'context': max(context, None)}
yield ('<tr id="skip-%d"><td colspan="2" align="center" '
'style="background:lightblue">'
'(...skipping <span id="skipcount-%d">%d</span> matching lines...) '
'<span id="skiplinks-%d">%s</span> '
'<span id="skiploading-%d" style="visibility:hidden;">Loading...'
'</span>'
'</td></tr>\n' % (last_id, last_id, skip,
last_id, expand_link, last_id))
for t in buffer[-context:]:
yield t
def _RenderDiff2TableRows(request, old_lines, old_patch, new_lines, new_patch,
colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False):
"""Internal version of RenderDiff2TableRows().
Args:
The same as for RenderDiff2TableRows.
Yields:
Tuples (tag, row) where tag is an indication of the row type.
"""
old_dict = {}
new_dict = {}
for patch, dct in [(old_patch, old_dict), (new_patch, new_dict)]:
# XXX GQL doesn't support OR yet... Otherwise we'd be using that.
for comment in models.Comment.gql(
'WHERE patch = :1 AND left = FALSE ORDER BY date', patch):
if comment.draft and comment.author != request.user:
continue # Only show your own drafts
comment.complete()
lst = dct.setdefault(comment.lineno, [])
lst.append(comment)
return _TableRowGenerator(old_patch, old_dict, len(old_lines)+1, 'new',
new_patch, new_dict, len(new_lines)+1, 'new',
_GenerateTriples(old_lines, new_lines),
colwidth, debug, request)
def _GenerateTriples(old_lines, new_lines):
"""Helper for _RenderDiff2TableRows yielding input for _TableRowGenerator.
Args:
old_lines: List of lines representing the patched file on the left.
new_lines: List of lines representing the patched file on the right.
Yields:
Tuples (tag, old_slice, new_slice) where tag is a tag as returned by
difflib.SequenceMatchser.get_opcodes(), and old_slice and new_slice
are lists of lines taken from old_lines and new_lines.
"""
sm = difflib.SequenceMatcher(None, old_lines, new_lines)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
yield tag, old_lines[i1:i2], new_lines[j1:j2]
def _GetComments(request):
"""Helper that returns comments for a patch.
Args:
request: Django Request object.
Returns:
A 2-tuple of (old, new) where old/new are dictionaries that holds comments
for that file, mapping from line number to a Comment entity.
"""
old_dict = {}
new_dict = {}
# XXX GQL doesn't support OR yet... Otherwise we'd be using
# .gql('WHERE patch = :1 AND (draft = FALSE OR author = :2) ORDER BY data',
# patch, request.user)
for comment in models.Comment.gql('WHERE patch = :1 ORDER BY date',
request.patch):
if comment.draft and comment.author != request.user:
continue # Only show your own drafts
comment.complete()
if comment.left:
dct = old_dict
else:
dct = new_dict
dct.setdefault(comment.lineno, []).append(comment)
return old_dict, new_dict
def _RenderDiffTableRows(request, old_lines, chunks, patch,
colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False):
"""Internal version of RenderDiffTableRows().
Args:
The same as for RenderDiffTableRows.
Yields:
Tuples (tag, row) where tag is an indication of the row type.
"""
old_dict = {}
new_dict = {}
if patch:
old_dict, new_dict = _GetComments(request)
old_max, new_max = _ComputeLineCounts(old_lines, chunks)
return _TableRowGenerator(patch, old_dict, old_max, 'old',
patch, new_dict, new_max, 'new',
patching.PatchChunks(old_lines, chunks),
colwidth, debug, request)
def _TableRowGenerator(old_patch, old_dict, old_max, old_snapshot,
new_patch, new_dict, new_max, new_snapshot,
triple_iterator, colwidth=settings.DEFAULT_COLUMN_WIDTH,
debug=False, request=None):
"""Helper function to render side-by-side table rows.
Args:
old_patch: First models.Patch instance.
old_dict: Dictionary with line numbers as keys and comments as values (left)
old_max: Line count of the patch on the left.
old_snapshot: A tag used in the comments form.
new_patch: Second models.Patch instance.
new_dict: Same as old_dict, but for the right side.
new_max: Line count of the patch on the right.
new_snapshot: A tag used in the comments form.
triple_iterator: Iterator that yields (tag, old, new) triples.
colwidth: Optional column width (default 80).
debug: Optional debugging flag (default False).
Yields:
Tuples (tag, row) where tag is an indication of the row type and
row is an HTML fragment representing one or more <td> elements.
"""
diff_params = intra_region_diff.GetDiffParams(dbg=debug)
ndigits = 1 + max(len(str(old_max)), len(str(new_max)))
indent = 1 + ndigits
old_offset = new_offset = 0
row_count = 0
# Render a row with a message if a side is empty or both sides are equal.
if old_patch == new_patch and (old_max == 0 or new_max == 0):
if old_max == 0:
msg_old = '(Empty)'
else:
msg_old = ''
if new_max == 0:
msg_new = '(Empty)'
else:
msg_new = ''
yield '', ('<tr><td class="info">%s</td>'
'<td class="info">%s</td></tr>' % (msg_old, msg_new))
elif old_patch is None or new_patch is None:
msg_old = msg_new = ''
if old_patch is None:
msg_old = '(no file at all)'
if new_patch is None:
msg_new = '(no file at all)'
yield '', ('<tr><td class="info">%s</td>'
'<td class="info">%s</td></tr>' % (msg_old, msg_new))
elif old_patch != new_patch and old_patch.lines == new_patch.lines:
yield '', ('<tr><td class="info" colspan="2">'
'(Both sides are equal)</td></tr>')
for tag, old, new in triple_iterator:
if tag.startswith('error'):
yield 'error', '<tr><td><h3>%s</h3></td></tr>\n' % cgi.escape(tag)
return
old1 = old_offset
old_offset = old2 = old1 + len(old)
new1 = new_offset
new_offset = new2 = new1 + len(new)
old_buff = []
new_buff = []
frag_list = []
do_ir_diff = tag == 'replace' and intra_region_diff.CanDoIRDiff(old, new)
for i in xrange(max(len(old), len(new))):
row_count += 1
old_lineno = old1 + i + 1
new_lineno = new1 + i + 1
old_valid = old1+i < old2
new_valid = new1+i < new2
# Start rendering the first row
frags = []
if i == 0 and tag != 'equal':
# Mark the first row of each non-equal chunk as a 'hook'.
frags.append('<tr name="hook"')
else:
frags.append('<tr')
frags.append(' id="pair-%d">' % row_count)
old_intra_diff = ''
new_intra_diff = ''
if old_valid:
old_intra_diff = old[i]
if new_valid:
new_intra_diff = new[i]
frag_list.append(frags)
if do_ir_diff:
# Don't render yet. Keep saving state necessary to render the whole
# region until we have encountered all the lines in the region.
old_buff.append([old_valid, old_lineno, old_intra_diff])
new_buff.append([new_valid, new_lineno, new_intra_diff])
else:
# We render line by line as usual if do_ir_diff is false
old_intra_diff = intra_region_diff.Break(
old_intra_diff, 0, colwidth, "\n" + " "*indent)
new_intra_diff = intra_region_diff.Break(
new_intra_diff, 0, colwidth, "\n" + " "*indent)
old_buff_out = [[old_valid, old_lineno,
(old_intra_diff, True, None)]]
new_buff_out = [[new_valid, new_lineno,
(new_intra_diff, True, None)]]
for tg, frag in _RenderDiffInternal(old_buff_out, new_buff_out,
ndigits, tag, frag_list,
do_ir_diff,
old_dict, new_dict,
old_patch, new_patch,
old_snapshot, new_snapshot,
debug, request):
yield tg, frag
frag_list = []
if do_ir_diff:
# So this was a replace block which means that the whole region still
# needs to be rendered.
old_lines = [b[2] for b in old_buff]
new_lines = [b[2] for b in new_buff]
ret = intra_region_diff.IntraRegionDiff(old_lines, new_lines,
diff_params)
old_chunks, new_chunks, ratio = ret
old_tag = 'old'
new_tag = 'new'
old_diff_out = intra_region_diff.RenderIntraRegionDiff(
old_lines, old_chunks, old_tag, ratio,
limit=colwidth, indent=indent, mark_tabs=True,
dbg=debug)
new_diff_out = intra_region_diff.RenderIntraRegionDiff(
new_lines, new_chunks, new_tag, ratio,
limit=colwidth, indent=indent, mark_tabs=True,
dbg=debug)
for (i, b) in enumerate(old_buff):
b[2] = old_diff_out[i]
for (i, b) in enumerate(new_buff):
b[2] = new_diff_out[i]
for tg, frag in _RenderDiffInternal(old_buff, new_buff,
ndigits, tag, frag_list,
do_ir_diff,
old_dict, new_dict,
old_patch, new_patch,
old_snapshot, new_snapshot,
debug, request):
yield tg, frag
old_buff = []
new_buff = []
def _RenderDiffInternal(old_buff, new_buff, ndigits, tag, frag_list,
do_ir_diff, old_dict, new_dict,
old_patch, new_patch,
old_snapshot, new_snapshot,
debug, request):
"""Helper for _TableRowGenerator()."""
obegin = (intra_region_diff.BEGIN_TAG %
intra_region_diff.COLOR_SCHEME['old']['match'])
nbegin = (intra_region_diff.BEGIN_TAG %
intra_region_diff.COLOR_SCHEME['new']['match'])
oend = intra_region_diff.END_TAG
nend = oend
user = users.get_current_user()
for i in xrange(len(old_buff)):
tg = tag
old_valid, old_lineno, old_out = old_buff[i]
new_valid, new_lineno, new_out = new_buff[i]
old_intra_diff, old_has_newline, old_debug_info = old_out
new_intra_diff, new_has_newline, new_debug_info = new_out
frags = frag_list[i]
# Render left text column
frags.append(_RenderDiffColumn(old_valid, tag, ndigits,
old_lineno, obegin, oend, old_intra_diff,
do_ir_diff, old_has_newline, 'old'))
# Render right text column
frags.append(_RenderDiffColumn(new_valid, tag, ndigits,
new_lineno, nbegin, nend, new_intra_diff,
do_ir_diff, new_has_newline, 'new'))
# End rendering the first row
frags.append('</tr>\n')
if debug:
frags.append('<tr>')
if old_debug_info:
frags.append('<td class="debug-info">%s</td>' %
old_debug_info.replace('\n', '<br>'))
else:
frags.append('<td></td>')
if new_debug_info:
frags.append('<td class="debug-info">%s</td>' %
new_debug_info.replace('\n', '<br>'))
else:
frags.append('<td></td>')
frags.append('</tr>\n')
if old_patch or new_patch:
# Start rendering the second row
if ((old_valid and old_lineno in old_dict) or
(new_valid and new_lineno in new_dict)):
tg += '_comment'
frags.append('<tr class="inline-comments" name="hook">')
else:
frags.append('<tr class="inline-comments">')
# Render left inline comments
frags.append(_RenderInlineComments(old_valid, old_lineno, old_dict,
user, old_patch, old_snapshot, 'old',
request))
# Render right inline comments
frags.append(_RenderInlineComments(new_valid, new_lineno, new_dict,
user, new_patch, new_snapshot, 'new',
request))
# End rendering the second row
frags.append('</tr>\n')
# Yield the combined fragments
yield tg, ''.join(frags)
def _RenderDiffColumn(line_valid, tag, ndigits, lineno, begin, end,
intra_diff, do_ir_diff, has_newline, prefix):
"""Helper function for _RenderDiffInternal().
Returns:
A rendered column.
"""
if line_valid:
cls_attr = '%s%s' % (prefix, tag)
if tag == 'equal':
lno = '%*d' % (ndigits, lineno)
else:
lno = _MarkupNumber(ndigits, lineno, 'u')
if tag == 'replace':
col_content = ('%s%s %s%s' % (begin, lno, end, intra_diff))
# If IR diff has been turned off or there is no matching new line at
# the end then switch to dark background CSS style.
if not do_ir_diff or not has_newline:
cls_attr = cls_attr + '1'
else:
col_content = '%s %s' % (lno, intra_diff)
return '<td class="%s" id="%scode%d">%s</td>' % (cls_attr, prefix,
lineno, col_content)
else:
return '<td class="%sblank"></td>' % prefix
def _RenderInlineComments(line_valid, lineno, data, user,
patch, snapshot, prefix, request):
"""Helper function for _RenderDiffInternal().
Returns:
Rendered comments.
"""
comments = []
if line_valid:
comments.append('<td id="%s-line-%s">' % (prefix, lineno))
if lineno in data:
comments.append(
_ExpandTemplate('inline_comment.html',
request,
user=user,
patch=patch,
patchset=patch.patchset,
issue=patch.patchset.issue,
snapshot=snapshot,
side='a' if prefix == 'old' else 'b',
comments=data[lineno],
lineno=lineno,
))
comments.append('</td>')
else:
comments.append('<td></td>')
return ''.join(comments)
def RenderUnifiedTableRows(request, parsed_lines):
"""Render the HTML table rows for a unified diff for a patch.
Args:
request: Django Request object.
parsed_lines: List of tuples for each line that contain the line number,
if they exist, for the old and new file.
Returns:
A list of html table rows.
"""
old_dict, new_dict = _GetComments(request)
rows = []
for old_line_no, new_line_no, line_text in parsed_lines:
row1_id = row2_id = ''
# When a line is unchanged (i.e. both old_line_no and new_line_no aren't 0)
# pick the old column line numbers when adding a comment.
if old_line_no:
row1_id = 'id="oldcode%d"' % old_line_no
row2_id = 'id="old-line-%d"' % old_line_no
elif new_line_no:
row1_id = 'id="newcode%d"' % new_line_no
row2_id = 'id="new-line-%d"' % new_line_no
if line_text[0] == '+':
style = 'udiffadd'
elif line_text[0] == '-':
style = 'udiffremove'
else:
style = ''
rows.append('<tr><td class="udiff %s" %s>%s</td></tr>' %
(style, row1_id, cgi.escape(line_text)))
frags = []
if old_line_no in old_dict or new_line_no in new_dict:
frags.append('<tr class="inline-comments" name="hook">')
if old_line_no in old_dict:
dct = old_dict
line_no = old_line_no
snapshot = 'old'
else:
dct = new_dict
line_no = new_line_no
snapshot = 'new'
frags.append(_RenderInlineComments(True, line_no, dct, request.user,
request.patch, snapshot, snapshot, request))
else:
frags.append('<tr class="inline-comments">')
frags.append('<td ' + row2_id +'></td>')
frags.append('</tr>')
rows.append(''.join(frags))
return rows
def _ComputeLineCounts(old_lines, chunks):
"""Compute the length of the old and new sides of a diff.
Args:
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
Returns:
A tuple (old_len, new_len) representing len(old_lines) and
len(new_lines), where new_lines is the list representing the
result of applying the patch chunks to old_lines, however, without
actually computing new_lines.
"""
old_len = len(old_lines)
new_len = old_len
if chunks:
(_, old_b), (_, new_b), old_lines, _ = chunks[-1]
new_len += new_b - old_b
return old_len, new_len
def _MarkupNumber(ndigits, number, tag):
"""Format a number in HTML in a given width with extra markup.
Args:
ndigits: the total width available for formatting
number: the number to be formatted
tag: HTML tag name, e.g. 'u'
Returns:
An HTML string that displays as ndigits wide, with the
number right-aligned and surrounded by an HTML tag; for example,
_MarkupNumber(42, 4, 'u') returns ' <u>42</u>'.
"""
formatted_number = str(number)
space_prefix = ' ' * (ndigits - len(formatted_number))
return '%s<%s>%s</%s>' % (space_prefix, tag, formatted_number, tag)
def _ExpandTemplate(name, request, **params):
"""Wrapper around django.template.loader.render_to_string().
For convenience, this takes keyword arguments instead of a dict.
"""
rslt = loader.render_to_string(name, params,
context_instance=RequestContext(request))
return rslt.encode('utf-8')
| Python |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_PERFORCE.lower(): VCS_PERFORCE,
"p4": VCS_PERFORCE,
VCS_GIT.lower(): VCS_GIT,
VCS_CVS.lower(): VCS_CVS,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
self.info = args.get("Info", None)
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data, headers={"Accept": "text/plain"})
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >>sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >>sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >>sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
elif e.code >= 500:
ErrorExit(e.read())
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class CondensedHelpFormatter(optparse.IndentedHelpFormatter):
"""Frees more horizontal space by removing indentation from group
options and collapsing arguments between short and long, e.g.
'-o ARG, --opt=ARG' to -o --opt ARG"""
def format_heading(self, heading):
return "%s:\n" % heading
def format_option(self, option):
self.dedent()
res = optparse.HelpFormatter.format_option(self, option)
self.indent()
return res
def format_option_strings(self, option):
self.set_long_opt_delimiter(" ")
optstr = optparse.HelpFormatter.format_option_strings(self, option)
optlist = optstr.split(", ")
if len(optlist) > 1:
if option.takes_value():
# strip METAVAR from all but the last option
optlist = [x.split()[0] for x in optlist[:-1]] + optlist[-1:]
optstr = " ".join(optlist)
return optstr
parser = optparse.OptionParser(
usage="%prog [options] [-- diff_options] [path...]",
add_help_option=False,
formatter=CondensedHelpFormatter()
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-t", "--title", action="store", dest="title",
help="New issue subject or new patch set title")
group.add_option("-m", "--message", action="store", dest="message",
default=None,
help="New issue description or new patch set message")
group.add_option("-F", "--file", action="store", dest="file",
default=None, help="Read the message above from file.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base URL path for files (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Same as --send_mail, but include diff as an "
"attachment, and prepend email subject with 'PATCH:'.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
local_email = email
if local_email is None:
local_email = GetEmail("Email (login for uploading to %s)" % server)
password = None
if keyring:
try:
password = keyring.get_password(host, local_email)
except:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
else:
password = getpass.getpass("Password for %s: " % local_email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(host, local_email, password)
return (local_email, password)
return rpc_server_class(server,
GetUserCredentials,
host_override=host_override,
save_cookies=save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (stdout, stderr, return code)
"""
logging.info("Running %s", command)
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code."""
out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GetGUID(self):
"""Return string to distinguish the repository from others, for example to
query all opened review issues for it"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GetGUID(self):
return self._GetInfo("Repository UUID")
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
url = self._GetInfo("URL")
if url:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
# TODO(anatoli) - repository specific hacks should be handled by server
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _GetInfo(self, key):
"""Parses 'svn info' for current dir. Returns value for key or None"""
for line in RunShell(["svn", "info"]).splitlines():
if line.startswith(key + ": "):
return line.split(":", 1)[1].strip()
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start,
self._EscapeFilename(dirname) or "."]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary:
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GetGUID(self):
revlist = RunShell("git rev-list --parents HEAD".split()).splitlines()
# M-A: Return the 1st root hash, there could be multiple when a
# subtree is merged. In that case, more analysis would need to
# be done to figure out which HEAD is the 'most representative'.
for r in revlist:
if ' ' not in r:
return r
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
# -M/-C will not print the diff for the deleted file when a file is renamed.
# This is confusing because the original file will not be shown on the
# review when a file is renamed. So first get the diff of all deleted files,
# then the diff of everything except deleted files with rename and copy
# support enabled.
cmd = [
"git", "diff", "--no-color", "--no-ext-diff", "--full-index",
"--ignore-submodules",
]
diff = RunShell(
cmd + ["--diff-filter=D"] + extra_args, env=env, silent_ok=True)
diff += RunShell(
cmd + ["--find-copies-harder", "--diff-filter=ACMRT"] + extra_args,
env=env, silent_ok=True)
# The CL could be only file deletion or not. So accept silent diff for both
# commands then check for an empty diff manually.
if not diff:
ErrorExit("No output from %s" % (cmd + extra_args))
return diff
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(
["git", "show", "HEAD:" + filename], silent_ok=True)
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_binary = self.IsBinaryData(base_content)
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetGUID(self):
"""For now we don't know how to get repository ID for CVS"""
return
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def GetGUID(self):
# See chapter "Uniquely identifying a repository"
# http://hgbook.red-bean.com/read/customizing-the-output-of-mercurial.html
info = RunShell("hg log -r0 --template {node}".split())
return info.strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
absname = os.path.join(self.repo_dir, filename)
return os.path.relpath(absname)
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir,
# but "hg diff" has given us the path relative to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary:
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.title:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_title = description["desc"].strip()
lines = raw_title.splitlines()
if len(lines):
options.title = lines[0]
def GetGUID(self):
"""For now we don't know how to get repository ID for Perforce"""
return
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary:
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, message):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion from 1.7 has a single centralized .svn folder
# ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )
# That's why we use 'svn info' instead of checking for .svn dir
res = RunDetectCommand(VCS_SUBVERSION, ["svn", "info"])
if res != None:
return res
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = "Use '--help -v' to show additional Perforce options."
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type)
form_fields = []
repo_guid = vcs.GetGUID()
if repo_guid:
form_fields.append(("repo_guid", repo_guid))
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
logging.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
# Process --message, --title and --file.
message = options.message or ""
title = options.title or ""
if options.file:
if options.message:
ErrorExit("Can't specify both message and message file options")
file = open(options.file, 'r')
message = file.read()
file.close()
if options.issue:
prompt = "Title describing this patch set: "
else:
prompt = "New issue subject: "
title = (
title or message.split('\n', 1)[0].strip() or raw_input(prompt).strip())
if not title and not options.issue:
ErrorExit("A non-empty title is required for a new issue")
# For existing issues, it's fine to give a patchset an empty name. Rietveld
# doesn't accept that so use a whitespace.
title = title or " "
if len(title) > 100:
title = title[:99] + '…'
if title and not options.issue:
message = message or title
form_fields.append(("subject", title))
# If it's a new issue send message as description. Otherwise a new
# message is created below on upload_complete.
if message and not options.issue:
form_fields.append(("description", message))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
payload = {} # payload for final request
if options.send_mail:
payload["send_mail"] = "yes"
if options.send_patch:
payload["attach_patch"] = "yes"
if options.issue and message:
payload["message"] = message
payload = urllib.urlencode(payload)
rpc_server.Send("/" + issue + "/upload_complete/" + (patchset or ""),
payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
count = 0
for str in words:
if len(str) > 1 and str[0] == str[-1]:
count += 1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
STR=[]
xSTR=[]
for string in words:
if string[0] == 'x':
xSTR.append(string)
else:
STR.append(string)
print xSTR, STR
return sorted(xSTR)+sorted(STR)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def lastElem(tuple):
return tuple[-1]
def sort_last(tuples):
return sorted(tuples, key=lastElem)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
import re
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
f = open(filename, 'r')
str = f.read()
if len(str) == 0:
return -1
str = re.sub("[!_?/\|<>{}():;,.@#$%^&*--+='~`]","",str)
str = str.lower().split()
ans = {"":[str[0]]}
index=0
for word in str[:-1]:
if ans.get(word):
ans[word].append(str[index+1])
else:
ans[word]=[str[index+1]]
index+=1
return ans
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
temp = ''
print word,
for i in range(200):
if mimic_dict.get(word) == None:
word = ''
temp = random.choice(mimic_dict[word])
print temp,
word = temp
return
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if count > 9:
return 'Number of donuts: many'
else:
return 'Number of donuts: ' + str(count)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ''
else:
return s[0:2]+s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
return s[0]+s[1:].replace(s[0], '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return b[:2]+a[2:]+' '+a[:2]+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if count > 9:
return 'Number of donuts: many'
else:
return 'Number of donuts: ' + str(count)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ''
else:
return s[0:2]+s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
return s[0]+s[1:].replace(s[0], '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return b[:2]+a[2:]+' '+a[:2]+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
count = 0
for str in words:
if len(str) > 1 and str[0] == str[-1]:
count += 1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
STR=[]
xSTR=[]
for string in words:
if string[0] == 'x':
xSTR.append(string)
else:
STR.append(string)
print xSTR, STR
return sorted(xSTR)+sorted(STR)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def lastElem(tuple):
return tuple[-1]
def sort_last(tuples):
return sorted(tuples, key=lastElem)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
ans=[]
i=0
while i < len(nums):
if nums[i] != nums[i-1]:
ans.append(nums[i])
i+=1
return ans
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
ans=[]
i=0
j=0
while i+j < len(list1)+len(list2):
if i < len(list1) and j < len(list2):
if list1[i] < list2[j]:
ans.append(list1[i])
i+=1
else:
ans.append(list2[j])
j+=1
elif j >= len(list2):
ans.append(list1[i])
i+=1
else:
ans.append(list2[j])
j+=1
return ans
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
def read_file(filename):
f = open(filename, 'r')
ans=[]
buf = ''
temp= ''
for line in f:
for i in line:
if i == ' ' or i == '\n':
buf = buf.lower()
ans += buf.split()
buf=''
else:
buf += i
f.close
dict = {}
for i in ans:
if i in dict:
dict[i]+=1
else:
dict[i]=1
return dict
def print_words(filename):
f = read_file(filename)
for key in sorted(f.keys(), reverse = True, key=f.get):
print key,' ', f.get(key)
def print_top(filename):
dict = read_file(filename)
count=0
for i in sorted(dict.keys(), reverse = True, key=dict.get):
if count < len(dict) and count < 20:
print i, dict.get(i)
count+=1
else:
break
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.