code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
from zope.interface import Interface
class IConversationView(Interface):
def comments():
"""Return all comments in the conversation.
"""
def conversation():
"""Return active conversation.
"""
def root_comments():
"""Return all of the root comments for a conversation.
"""
def children(comment):
"""Return all of the children comments for a parent comment.
"""
class ICommentView(Interface):
def comment():
"""Return active comment.
"""
def author():
"""Return the name of the author of this comment.
If no full name is known the userid is returned.
"""
def quotedBody():
"""Return the body of the comment, quoted for a reply.
"""
| Python |
from Products import Five
from Products.CMFCore.utils import getToolByName
from Products.Ploneboard.browser.utils import toPloneboardTime, getNumberOfConversations
from Products.Ploneboard.interfaces import IForum, IComment
class BoardView(Five.BrowserView):
"""View methods for board type
"""
def __init__(self, context, request):
Five.BrowserView.__init__(self, context, request)
def getKeyedForums(self, sitewide=False):
"""Return all the forums in a board."""
catalog = getToolByName(self.context, 'portal_catalog')
query = {'object_provides':IForum.__identifier__}
if not sitewide:
query['path'] = '/'.join(self.context.getPhysicalPath())
result = {}
for f in catalog(query):
obj = f._unrestrictedGetObject()
data = dict(absolute_url=f.getURL(),
Title=f.Title,
Description=f.Description,
getNumberOfConversations=getNumberOfConversations(obj, catalog), # XXX THIS AND CATEGORY IS WHY WE NEED GETOBJECT, TRY CACHING
getLastCommentDate=None,
getLastCommentAuthor=None,
)
lastcomment = catalog(object_provides=IComment.__identifier__,
sort_on='created',
sort_order='reverse',
sort_limit=1,
path='/'.join(obj.getPhysicalPath()))
if lastcomment:
lastcomment = lastcomment[0]
data['getLastCommentDate'] = self.toPloneboardTime(lastcomment.created)
data['getLastCommentAuthor'] = lastcomment.Creator
try:
categories = obj.getCategory()
except AttributeError:
categories = None
if not categories:
categories = None
if not isinstance(categories, (tuple,list)):
categories = categories,
for category in categories:
try:
categoryforums = result.get(category, [])
categoryforums.append(data)
result[category] = categoryforums
except TypeError: # category is list?!
result[', '.join(category)] = data
return result
def toPloneboardTime(self, time_=None):
"""Return time formatted for Ploneboard"""
return toPloneboardTime(self.context, self.request, time_)
| Python |
#
| Python |
"""
$Id: comment.py 99041 2009-10-06 06:38:21Z sureshvv $
"""
from zope import interface
from Acquisition import aq_base
from DateTime.DateTime import DateTime
from Products import Five
from Products.CMFCore import utils as cmf_utils
from Products.CMFCore.utils import getToolByName
from Products.Ploneboard import permissions
from Products.Ploneboard.batch import Batch
from Products.Ploneboard.browser.interfaces import IConversationView
from Products.Ploneboard.browser.interfaces import ICommentView
from Products.Ploneboard.browser.utils import toPloneboardTime
from Products.Ploneboard.utils import PloneboardMessageFactory as _
class CommentViewableView(Five.BrowserView):
"""Any view that might want to interact with comments should inherit
from this base class.
"""
def __init__(self, context, request):
Five.BrowserView.__init__(self, context, request)
self.portal_actions = cmf_utils.getToolByName(self.context, 'portal_actions')
self.plone_utils = cmf_utils.getToolByName(self.context, 'plone_utils')
self.portal_membership = cmf_utils.getToolByName(self.context, 'portal_membership')
self.portal_workflow = cmf_utils.getToolByName(self.context, 'portal_workflow')
def _buildDict(self, comment):
"""Produce a dict representative of all the important properties
of a comment.
"""
checkPermission = self.portal_membership.checkPermission
actions = self.portal_actions.listFilteredActionsFor(comment)
res= {
'Title': comment.title_or_id(),
'Creator': comment.Creator(),
'creation_date': self.toPloneboardTime(comment.CreationDate()),
'getId': comment.getId(),
'getText': comment.getText(),
'absolute_url': comment.absolute_url(),
'getAttachments': comment.getAttachments(),
'canEdit': checkPermission(permissions.EditComment, comment),
'canDelete': checkPermission(permissions.DeleteComment, comment),
'canReply': checkPermission(permissions.AddComment, comment),
'getObject': comment,
'workflowActions' : actions['workflow'],
'review_state' : self.portal_workflow.getInfoFor(comment, 'review_state'),
'reviewStateTitle' : self.plone_utils.getReviewStateTitleFor(comment),
'UID': comment.UID(),
}
return res
def toPloneboardTime(self, time_=None):
"""Return time formatted for Ploneboard"""
return toPloneboardTime(self.context, self.request, time_)
class CommentView(CommentViewableView):
"""A view for getting information about one specific comment.
"""
interface.implements(ICommentView)
def comment(self):
return self._buildDict(self.context)
def author(self):
creator = self.context.Creator()
info = self.portal_membership.getMemberInfo(creator)
if info is None:
return creator
fullname_or_id = info.get('fullname') or info.get('username') or creator
return fullname_or_id
def quotedBody(self):
text = self.context.getText()
if text:
return _("label_quote", u"Previously ${author} wrote: ${quote}", {"author": unicode(self.author(), 'utf-8'),
"quote": unicode("<blockquote>%s</blockquote></br>" % (self.context.getText()), 'utf-8')})
else:
return ''
class ConversationView(CommentView):
"""A view component for querying conversations.
"""
interface.implements(IConversationView)
def conversation(self):
checkPermission = self.portal_membership.checkPermission
conv = self.context
forum = conv.getForum()
return {
'maximumAttachments' : forum.getMaxAttachments(),
'maximumAttachmentSize' : forum.getMaxAttachmentSize(),
'canAttach': forum.getMaxAttachments()>0 and \
checkPermission(permissions.AddAttachment,conv),
}
def comments(self):
conv = self.context
forum = conv.getForum()
batchSize = forum.getConversationBatchSize()
batchStart = self.request.get('b_start', 0)
if type(batchStart) == type('a'):
batchStart = int(batchStart.split('#')[0])
numComments = conv.getNumberOfComments()
return Batch(self._getComments, numComments, batchSize, batchStart, orphan=1)
def root_comments(self):
rootcomments = self.context.getRootComments()
for ob in rootcomments:
yield self._buildDict(ob)
def children(self, comment):
if type(comment) is dict:
comment = comment['getObject']
for ob in comment.getReplies():
yield self._buildDict(ob)
def _getComments(self, limit, offset):
"""Dictify comments before returning them to the batch
"""
return [self._buildDict(ob) for ob in self.context.getComments(limit=limit, offset=offset)]
class RecentConversationsView(CommentViewableView):
"""Find recent conversations
"""
def __init__(self, context, request):
Five.BrowserView.__init__(self, context, request)
self.portal_workflow = cmf_utils.getToolByName(self.context, 'portal_workflow')
self.plone_utils = cmf_utils.getToolByName(self.context, 'plone_utils')
self.portal_catalog = cmf_utils.getToolByName(self.context, 'portal_catalog')
self.portal_membership = cmf_utils.getToolByName(self.context, 'portal_membership')
def num_conversations(self):
catalog = self.portal_catalog
results = catalog(object_provides='Products.Ploneboard.interfaces.IConversation',
path='/'.join(self.context.getPhysicalPath()),)
return len(results)
def results(self, limit=20, offset=0):
catalog = self.portal_catalog
results = catalog(object_provides='Products.Ploneboard.interfaces.IConversation',
sort_on='modified',
sort_order='reverse',
sort_limit=(offset+limit),
path='/'.join(self.context.getPhysicalPath()))[offset:offset+limit]
return filter(None, [self._buildDict(r.getObject()) for r in results])
def _buildDict(self, ob):
forum = ob.getForum()
wfstate = self.portal_workflow.getInfoFor(ob, 'review_state')
wfstate = self.plone_utils.normalizeString(wfstate)
creator = ob.Creator()
creatorInfo = self.portal_membership.getMemberInfo(creator)
if creatorInfo is not None and creatorInfo.get('fullname', "") != "":
creator = creatorInfo['fullname']
lastComment = ob.getLastComment()
if lastComment is None:
return None
canAccessLastComment = self.portal_membership.checkPermission('View', lastComment)
lastCommentCreator = lastComment.Creator()
creatorInfo = self.portal_membership.getMemberInfo(lastCommentCreator)
if creatorInfo is not None and creatorInfo.get('fullname', '') != "":
lastCommentCreator = creatorInfo['fullname']
return { 'Title': ob.title_or_id(),
'Description' : ob.Description(),
'absolute_url': ob.absolute_url(),
'forum_title' : forum.title_or_id(),
'forum_url' : forum.absolute_url(),
'review_state_normalized' : wfstate,
'num_comments' : ob.getNumberOfComments(),
'creator' : creator,
'last_comment_id' : lastComment.getId(),
'last_comment_creator' : lastCommentCreator,
'last_comment_date' : lastComment.created(),
'can_access_last_comment' : canAccessLastComment,
'is_new' : self._is_new(ob.modified()),
}
def _is_new(self, modified):
llt = getattr(aq_base(self), '_last_login_time', [])
if llt == []:
m = self.portal_membership.getAuthenticatedMember()
if m.has_role('Anonymous'):
llt = self._last_login_time = None
else:
llt = self._last_login_time = m.getProperty('last_login_time', 0)
if llt is None: # not logged in
return False
elif llt == 0: # never logged in before
return True
else:
return (modified >= DateTime(llt))
class UnansweredConversationsView(RecentConversationsView):
"""Find unanswered conversations
"""
def num_conversations(self):
catalog = self.portal_catalog
results = catalog(object_provides='Products.Ploneboard.interfaces.IConversation',
num_comments=1,
path='/'.join(self.context.getPhysicalPath()),)
return len(results)
def results(self, limit=20, offset=0):
catalog = self.portal_catalog
results = catalog(object_provides='Products.Ploneboard.interfaces.IConversation',
num_comments=1,
sort_on='modified',
sort_order='reverse',
sort_limit=(offset+limit),
path='/'.join(self.context.getPhysicalPath()))[offset:offset+limit]
return [self._buildDict(r.getObject()) for r in results]
def _buildDict(self, ob):
forum = ob.getForum()
wfstate = self.portal_workflow.getInfoFor(ob, 'review_state')
wfstate = self.plone_utils.normalizeString(wfstate)
creator = ob.Creator()
creatorInfo = self.portal_membership.getMemberInfo(creator)
if creatorInfo is not None and creatorInfo.get('fullname', "") != "":
creator = creatorInfo['fullname']
return { 'Title': ob.title_or_id(),
'Description' : ob.Description(),
'created' : ob.created(),
'absolute_url': ob.absolute_url(),
'forum_title' : forum.title_or_id(),
'forum_url' : forum.absolute_url(),
'review_state_normalized' : wfstate,
'creator' : creator,
'is_new' : self._is_new(ob.modified()),
}
class DeleteCommentView(Five.BrowserView):
"""Delete the current comment. If the comment is the root comment
of a conversation, delete the entire conversation instead.
"""
def __call__(self):
redirect = self.request.response.redirect
comment = self.context
conversation = comment.getConversation()
plone_utils = cmf_utils.getToolByName(comment, 'plone_utils')
if len(conversation.getComments()) == 1:
forum = conversation.getForum()
conversation.delete()
msg = _(u'Conversation deleted')
plone_utils.addPortalMessage(msg)
redirect(forum.absolute_url())
else:
comment.delete()
msg = _(u'Comment deleted')
plone_utils.addPortalMessage(msg)
redirect(conversation.absolute_url())
| Python |
from zope.interface import implements
from zope import schema
from zope.component import getUtility
from zope.component import getMultiAdapter
from zope.formlib.form import Fields
from plone.memoize.view import memoize
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.i18n.normalizer.interfaces import IIDNormalizer
from plone.app.portlets.portlets import base
from plone.portlets.interfaces import IPortletDataProvider
from Products.Ploneboard.utils import PloneboardMessageFactory as _
class IRecentConversationsPortlet(IPortletDataProvider):
"""A portlet which shows recent Ploneboard conversations.
"""
title = schema.TextLine(title=_(u"title_title",
default=u"Portlet title"),
required=True,
default=u"Recent messages")
count = schema.Int(title=_(u"title_count",
default=u"Number of items to display"),
description=_(u"help_count",
default=u"How many items to list."),
required=True,
default=5)
class Assignment(base.Assignment):
implements(IRecentConversationsPortlet)
title = u"Recent messages"
count = 5
def __init__(self, title=None, count=None):
if title is not None:
self.title=title
if count is not None:
self.count=count
class Renderer(base.Renderer):
def __init__(self, context, request, view, manager, data):
base.Renderer.__init__(self, context, request, view, manager, data)
@memoize
def results(self):
ct=getToolByName(self.context, "portal_catalog")
normalize=getUtility(IIDNormalizer).normalize
icons=getMultiAdapter((self.context, self.request),
name="plone").icons_visible()
if icons:
portal=getMultiAdapter((self.context, self.request),
name="plone_portal_state").portal_url()+"/"
brains=ct(
object_provides="Products.Ploneboard.interfaces.IConversation",
sort_on="modified",
sort_order="reverse",
sort_limit=self.data.count)[:self.data.count]
def morph(brain):
obj=brain.getObject()
forum=obj.getForum()
return dict(
title = brain.Title,
description = brain.Description,
url = brain.getURL()+"/view",
icon = icons and portal+brain.getIcon or None,
forum_url = forum.absolute_url(),
forum_title = forum.title_or_id(),
review_state = normalize(brain.review_state),
portal_type = normalize(brain.portal_type),
date = brain.modified)
return [morph(brain) for brain in brains]
@property
def available(self):
return len(self.results())>0
def update(self):
self.conversations=self.results()
@property
def title(self):
return self.data.title
@property
def next_url(self):
state=getMultiAdapter((self.context, self.request),
name="plone_portal_state")
return state.portal_url()+"/ploneboard_recent"
render = ViewPageTemplateFile("recent.pt")
class AddForm(base.AddForm):
form_fields = Fields(IRecentConversationsPortlet)
label = _(u"label_add_portlet",
default=u"Add recent conversations portlet.")
description = _(u"help_add_portlet",
default=u"This portlet shows conversations with recent comments.")
def create(self, data):
return Assignment(title=data.get("title"), count=data.get("count"))
class EditForm(base.EditForm):
form_fields = Fields(IRecentConversationsPortlet)
label = _(u"label_add_portlet",
default=u"Add recent conversations portlet.")
description = _(u"help_add_portlet",
default=u"This portlet shows conversations with recent comments.")
| Python |
# Poof
| Python |
from zope.interface import implements
from AccessControl import ClassSecurityInfo
from Acquisition import aq_inner, aq_chain
from DateTime import DateTime
from OFS.Image import File
from Products.CMFCore.utils import getToolByName
from Products.Archetypes.public import BaseBTreeFolderSchema, Schema, TextField, ReferenceField
from Products.Archetypes.public import BaseBTreeFolder, registerType
from Products.Archetypes.public import RichWidget, ReferenceWidget
from Products.Archetypes.utils import shasattr
from Products.Ploneboard.config import PROJECTNAME, REPLY_RELATIONSHIP
from Products.CMFPlone.utils import _createObjectByType
from Products.Ploneboard.permissions import ViewBoard, AddComment, \
EditComment, AddAttachment, ManageComment, DeleteComment
from Products.Ploneboard.interfaces import IConversation, IComment
from Products.Ploneboard import utils
from Products.CMFPlone.interfaces.NonStructuralFolder \
import INonStructuralFolder as ZopeTwoINonStructuralFolder
from Products.CMFPlone.interfaces.structure import INonStructuralFolder
from Products.Archetypes.event import ObjectInitializedEvent
from zope import event
PBCommentBaseBTreeFolderSchema = BaseBTreeFolderSchema.copy()
PBCommentBaseBTreeFolderSchema['title'].read_permission = ViewBoard
PBCommentBaseBTreeFolderSchema['title'].write_permission = EditComment
schema = PBCommentBaseBTreeFolderSchema + Schema((
TextField('text',
searchable = 1,
default_content_type = 'text/html',
default_output_type = 'text/x-html-safe',
allowable_content_types=('text/html',
'text/plain'),
accessor='getText',
read_permission = ViewBoard,
write_permission = EditComment,
widget = RichWidget(description = "Enter comment body.",
description_msgid = "help_text",
label = "Text",
label_msgid = "label_text",
rows = 5,
helper_css = ('ploneboard.css',)
)),
ReferenceField(
name='reply_to',
accessor='inReplyTo', # Suboptimal accessor naming here...
edit_accessor='inReplyToUID',
mutator='setInReplyTo',
relationship=REPLY_RELATIONSHIP,
widget=ReferenceWidget(visible=False),
),
))
utils.finalizeSchema(schema)
class PloneboardComment(BaseBTreeFolder):
"""A comment contains regular text body and metadata."""
# Use RichDocument pattern for attachments
# Don't inherit from btreefolder...
implements(IComment, INonStructuralFolder)
__implements__ = (BaseBTreeFolder.__implements__, ZopeTwoINonStructuralFolder)
meta_type = 'PloneboardComment'
schema = schema
_replies = None # OIBTree: { id -> 1 }
_reply_count = None # A BTrees.Length
_in_reply_to = None # Id to comment this is a reply to
security = ClassSecurityInfo()
def __init__(self, oid, **kwargs):
BaseBTreeFolder.__init__(self, oid, **kwargs)
self.creation_date = DateTime()
security.declareProtected(EditComment, 'edit')
def edit(self, **kwargs):
"""Alias for update()
"""
self.update(**kwargs)
security.declareProtected(ViewBoard, 'getConversation')
def getConversation(self):
"""Returns containing conversation."""
# Try containment
stoptypes = ['Plone Site']
for obj in aq_chain(aq_inner(self)):
if hasattr(obj, 'portal_type') and obj.portal_type not in stoptypes:
if IConversation.providedBy(obj):
return obj
return None
security.declareProtected(AddComment, 'addReply')
def addReply(self,
title,
text,
creator=None,
files=None ):
"""Add a reply to this comment."""
conv = self.getConversation()
id = conv.generateId(prefix='')
if not title:
title = conv.Title()
if not title.lower().startswith('re:'):
title = 'Re: ' + title
m = _createObjectByType(self.portal_type, conv, id)
event.notify(ObjectInitializedEvent(m))
# XXX: There is some permission problem with AT write_permission
# and using **kwargs in the _createObjectByType statement.
m.setTitle(title)
m.setText(text)
m.setInReplyTo(self.UID())
if creator is not None:
m.setCreators([creator])
# Create files in message
if files:
for file in files:
# Get raw filedata, not persistent object with reference to tempstorage
# file.data might in fact be OFS.Image.Pdata - str will piece it all together
attachment = File(file.getId(), file.title_or_id(), str(file.data), file.getContentType())
m.addAttachment(attachment)
# If this comment is being added by anonymous, make sure that the true
# owner in zope is the owner of the forum, not the parent comment or
# conversation. Otherwise, that owner may be able to view or delete
# the comment.
membership = getToolByName(self, 'portal_membership')
if membership.isAnonymousUser():
forum = self.getConversation().getForum()
utils.changeOwnershipOf(m, forum.owner_info()['id'], False)
m.reindexObject()
conv.reindexObject() # Sets modified
return m
security.declareProtected(AddComment, 'deleteReply')
def deleteReply(self, comment):
""" Removes comment from the replies index """
comment.deleteReference(self, REPLY_RELATIONSHIP)
security.declareProtected(ViewBoard, 'getReplies')
def getReplies(self):
"""Returns the comments that were replies to this one."""
# Return backreferences
return self.getBRefs(REPLY_RELATIONSHIP)
security.declareProtected(ViewBoard, 'getTitle')
def getTitle(self):
"""Returns the subject of the comment."""
return self.Title()
def childIds(self, level=0):
"""
Returns list of ids of all child comments, excluding this comment.
"""
if level == 0:
result = []
else:
result = [self.getId()]
replies = self.getReplies()
if replies:
for msg_object in replies:
result = result + msg_object.childIds(level+1)
return result
security.declareProtected(ManageComment, 'makeBranch')
def makeBranch(self):
""""""
# Contains mappings - old_msg_id -> new_msg_id
ids = {}
parent = self.getConversation()
forum = parent.getForum()
conv = forum.addConversation(self.getTitle(), self.getText())
# here we get id of the first Comment in newly created Conversation
first_msg_id = conv.objectIds()[0]
ids.update({self.getId() : first_msg_id})
objects = map(parent.getComment, self.childIds())
for obj in objects:
replyId = obj.inReplyTo().getId()
comment = conv.getComment(ids.get(replyId))
msg = comment.addReply(obj.getTitle(), obj.getText())
ids.update({obj.getId() : msg.getId()})
# Here we need to set some fields from old objects
# What else should we update?
msg.creation_date = obj.creation_date
msg.setEffectiveDate(obj.EffectiveDate())
msg.setExpirationDate(obj.ExpirationDate())
msg.creator = obj.Creator()
# manually delete all replies
for msgid in self.childIds():
parent._delObject(msgid)
parent._delObject(self.getId()) # delete ourselves and all our descendants
# if conversation after branching is empty, remove it
if parent.getNumberOfComments() == 0:
forum._delObject(parent.getId())
# we need to reindex stuff in newly created Conversation
#for o in conv.objectValues():
# o.reindexObject()
return conv
###########################
# Attachment support #
###########################
def attachmentFilter(self):
return { 'portal_type' : [
'File', 'Image',
'ImageAttachment', 'FileAttachment'
],
}
security.declareProtected(ViewBoard, 'hasAttachment')
def hasAttachment(self):
"""Return 0 or 1 if this comment has attachments."""
return not not self.objectIds(filter=self.attachmentFilter())
security.declareProtected(AddAttachment, 'validateAddAttachment')
def validateAddAttachment(self, file):
def FileSize(file):
if hasattr(file, 'size'):
size=file.size
elif hasattr(file, 'tell'):
file.seek(0, 2)
size=file.tell()
file.seek(0)
else:
try:
size=len(file)
except TypeError:
size=0
return size/1024
if self.getNumberOfAttachments()>=self.getNumberOfAllowedAttachments():
return False
maxsize=self.getConversation().getMaxAttachmentSize()
if maxsize!=-1:
if FileSize(file)>maxsize:
return False
return True
security.declareProtected(AddAttachment, 'addAttachment')
def addAttachment(self, file, title=None):
""" """
if not self.validateAddAttachment(file):
raise ValueError, "Attachment could not be added"
content_type = file.getContentType()
if content_type.startswith('image/'):
type_name = 'ImageAttachment'
mutator = 'setImage'
else:
type_name = 'FileAttachment'
mutator = 'setFile'
attachment = _createObjectByType(type_name, self, file.getId(),
title=file.title)
event.notify(ObjectInitializedEvent(attachment))
getattr(attachment, mutator)(file)
if title is not None:
attachment.setTitle(title)
attachment.unmarkCreationFlag()
if shasattr(attachment, 'at_post_create_script'):
attachment.at_post_create_script()
security.declareProtected(AddAttachment, 'removeAttachment')
def removeAttachment(self, id):
""" """
self._delObject(id)
security.declareProtected(ViewBoard, 'getAttachment')
def getAttachment(self, id):
""" """
return getattr(self, id)
security.declareProtected(ViewBoard, 'getAttachments')
def getAttachments(self):
""" """
return self.contentValues(filter=self.attachmentFilter())
security.declareProtected(ViewBoard, 'getNumberOfAttachments')
def getNumberOfAttachments(self):
return len(self.contentIds(filter=self.attachmentFilter()))
security.declareProtected(AddAttachment, 'getNumberOfAllowedAttachments')
def getNumberOfAllowedAttachments(self):
"""
Returns number of allowed attachments
"""
parent = self.getConversation()
forum = parent.getForum()
return forum.getMaxAttachments()
############################################
security.declareProtected(ViewBoard, 'getText')
def getText(self, mimetype=None, **kwargs):
""" """
# Maybe we need to set caching for transform?
unit=self.Schema()["text"].getBaseUnit(self)
raw = unit.getRaw()
content_type = unit.getContentType()
pb_tool = getToolByName(self, 'portal_ploneboard')
return pb_tool.performCommentTransform(raw, context=self,
content_type=content_type)
security.declareProtected(ViewBoard, 'Description')
def Description(self, **kwargs):
"""We have to override Description here to handle arbitrary
arguments since PortalFolder defines it."""
if kwargs.get('mimetype', None) is None:
kwargs['mimetype'] = 'text/plain'
return self.getField('text').get(self, **kwargs)
security.declareProtected(DeleteComment, "delete")
def delete(self):
"""Delete this comment and make sure all comment replies to this
comment are also cleaned up.
"""
parent_comment = self.inReplyTo()
for reply in self.getReplies():
reply.setInReplyTo(parent_comment)
reply.reindexObject()
conversation = self.getConversation()
conversation._delObject(self.getId())
conversation.reindexObject()
def __nonzero__(self):
return 1
def __str__(self):
return "<PloneboardComment: title=%r;>" % self.Title()
__repr__ = __str__
security.declareProtected(DeleteComment, "object_delete")
def object_delete(self):
"""Delete the comment the 'proper' way.
"""
return self.restrictedTraverse('@@delete_view')()
registerType(PloneboardComment, PROJECTNAME)
| Python |
from zope.interface import implements
from AccessControl import ClassSecurityInfo
from Acquisition import aq_inner, aq_chain
from OFS.CopySupport import _cb_decode, _cb_encode, CopyContainer, CopyError
from OFS.Image import File
from OFS.Moniker import Moniker
from Products.CMFCore.utils import getToolByName
from Products.Archetypes.public import BaseBTreeFolderSchema, Schema, TextField
from Products.Archetypes.public import BaseBTreeFolder, registerType
from Products.Archetypes.public import TextAreaWidget
from Products.Ploneboard.config import PROJECTNAME
from Products.CMFPlone.utils import _createObjectByType
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.Ploneboard.permissions import (
ViewBoard, AddComment, ManageConversation, EditComment, MergeConversation)
from Products.Ploneboard.interfaces import IForum, IConversation, IComment
from Products.Ploneboard import utils
from Products.CMFPlone.interfaces.NonStructuralFolder \
import INonStructuralFolder as ZopeTwoINonStructuralFolder
from Products.CMFPlone.interfaces.structure import INonStructuralFolder
from Products.Archetypes.event import ObjectInitializedEvent
from zope import event
PBConversationBaseBTreeFolderSchema = BaseBTreeFolderSchema.copy()
PBConversationBaseBTreeFolderSchema['title'].read_permission = ViewBoard
PBConversationBaseBTreeFolderSchema['title'].write_permission = EditComment
schema = PBConversationBaseBTreeFolderSchema + Schema((
TextField('description',
searchable = 1,
read_permission = ViewBoard,
write_permission = EditComment,
default_content_type = 'text/plain',
default_output_type = 'text/plain',
widget = TextAreaWidget(description = "Enter a brief description of the conversation.",
description_msgid = "help_description_conversation",
label = "Description",
label_msgid = "label_description_conversation",
i18n_domain = "ploneboard",
rows = 5)),
))
utils.finalizeSchema(schema)
class PloneboardConversation(BrowserDefaultMixin, BaseBTreeFolder):
"""Conversation contains comments."""
implements(IConversation, INonStructuralFolder)
__implements__ = (BaseBTreeFolder.__implements__, BrowserDefaultMixin.__implements__, ZopeTwoINonStructuralFolder)
meta_type = 'PloneboardConversation'
schema = schema
_at_rename_after_creation = True
security = ClassSecurityInfo()
def getCatalog(self):
return getToolByName(self, 'portal_catalog')
security.declareProtected(ManageConversation, 'edit')
def edit(self, **kwargs):
"""Alias for update()
"""
self.update(**kwargs)
security.declareProtected(ViewBoard, 'getTitle')
def getTitle(self):
"""Get the title of this conversation"""
return self.Title()
security.declareProtected(ViewBoard, 'getForum')
def getForum(self):
"""Returns containing forum."""
# Try containment
stoptypes = ['Plone Site']
for obj in aq_chain(aq_inner(self)):
if hasattr(obj, 'portal_type') and obj.portal_type not in stoptypes:
if IForum.providedBy(obj):
return obj
return None
security.declareProtected(ManageConversation, 'removeComment')
def removeComment( self, comment):
self.manage_delObjects([comment.getId()])
# XXX reparent replies to this comment ?
security.declareProtected(AddComment, 'addComment')
def addComment( self, title, text, creator=None, files=None):
"""Adds a new comment with subject and body."""
id = self.generateId(prefix='')
if not title:
title = self.Title()
m = _createObjectByType('PloneboardComment', self, id)
event.notify(ObjectInitializedEvent(m))
# XXX: There is some permission problem with AT write_permission
# and using **kwargs in the _createObjectByType statement.
m.setTitle(title)
m.setText(text)
if creator is not None:
m.setCreators([creator])
# Create files in message
if files:
for file in files:
# Get raw filedata, not persistent object with reference to tempstorage
attachment = File(file.getId(), file.title_or_id(), str(file.data), file.getContentType())
m.addAttachment(attachment)
# If this comment is being added by anonymous, make sure that the true
# owner in zope is the owner of the forum, not the parent comment or
# conversation. Otherwise, that owner may be able to view or delete
# the comment.
membership = getToolByName(self, 'portal_membership')
if membership.isAnonymousUser():
forum = self.getForum()
utils.changeOwnershipOf(m, forum.owner_info()['id'], False)
m.indexObject()
self.reindexObject() # Sets modified
return m
security.declareProtected(ViewBoard, 'getComment')
def getComment(self, comment_id, default=None):
"""Returns the comment with the specified id."""
#return self._getOb(comment_id, default)
comments = self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IComment',
getId=comment_id)
if comments:
return comments[0].getObject()
else:
return None
security.declareProtected(ViewBoard, 'getComments')
def getComments(self, limit=30, offset=0, **kw):
"""
Retrieves the specified number of comments with offset 'offset'.
In addition there are kw args for sorting and retrieval options.
"""
query = {'object_provides' : 'Products.Ploneboard.interfaces.IComment',
'sort_on' : 'created',
'sort_limit' : (offset+limit),
'path' : '/'.join(self.getPhysicalPath()),}
query.update(kw)
catalog=self.getCatalog()
return [f.getObject() for f in catalog(**query)[offset:offset+limit]]
security.declareProtected(ViewBoard, 'getNumberOfComments')
def getNumberOfComments(self):
"""
Returns the number of comments in this conversation.
"""
return len(self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IComment',
path='/'.join(self.getPhysicalPath())))
security.declareProtected(ViewBoard, 'getLastCommentDate')
def getLastCommentDate(self):
"""
Returns a DateTime corresponding to the timestamp of the last comment
for the conversation.
"""
comment = self.getLastComment()
if comment:
return comment.created()
return None
security.declareProtected(ViewBoard, 'getLastCommentAuthor')
def getLastCommentAuthor(self):
"""
Returns the name of the author of the last comment.
"""
comment = self.getLastComment()
if comment:
return comment.Creator()
return None
security.declareProtected(ViewBoard, 'getLastComment')
def getLastComment(self):
"""
Returns the last comment as full object..
Returns None if there is no comment
"""
res = self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IComment', \
sort_on='created', sort_order='reverse', sort_limit=1,
path='/'.join(self.getPhysicalPath()))
if res:
return res[0].getObject()
return None
security.declareProtected(ViewBoard, 'getRootComments')
def getRootComments(self):
"""
Return a list all comments rooted to the board; ie comments which
are not replies to other comments.
"""
raw = self.getComments()
ours = [ comment for comment in raw if comment.inReplyToUID() is None]
return ours
security.declareProtected(ViewBoard, 'getFirstComment')
def getFirstComment(self):
"""
See IConversation.getFirstComment.__doc__
"""
res = self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IComment',
sort_on='created', sort_limit=1,
path='/'.join(self.getPhysicalPath()))
if res:
return res[0].getObject()
else:
return None
security.declareProtected(ManageConversation, 'moveToForum')
def moveToForum(self, forum_id):
"""Moves conversation to another forum"""
forum = self.getForum().getBoard().getForum(forum_id)
if forum:
parent = self.getForum()
cut_objects = parent.manage_cutObjects((self.getId(),) )
forum.manage_pasteObjects(cut_objects)
security.declareProtected(ManageConversation, 'delete')
def delete(self):
""""""
parent = self.getForum()
parent._delObject(self.getId())
security.declareProtected(ViewBoard, 'Creator')
def Creator(self):
# XXX Backwards compatability with old version
return getattr(self, '_creator', None) or BaseBTreeFolder.Creator(self)
def __nonzero__(self):
return 1
# No setting of default page - makes no sense
def canSetDefaultPage(self):
return False
security.declareProtected(MergeConversation, 'manage_pasteObjects')
def manage_pasteObjects(self, cp):
""" merge another conversation """
try:
op, mdatas = _cb_decode(cp)
except:
raise CopyError, "Invalid content"
if op == 0:
raise ValueError('Not allowed to copy content into conversation')
if op != 1:
raise ValueError, "Invalid operation of content"
obj = self.unrestrictedTraverse(mdatas[0])
if IConversation.providedBy(obj):
if obj.getParentNode() != self.getParentNode():
raise ValueError, "Invalid parent of content"
forum = obj.getForum()
obj_id = obj.getId()
o_list = obj.objectValues()
oblist=[Moniker(o1).dump() for o1 in o_list]
cp = (1, oblist)
cp = _cb_encode(cp)
CopyContainer.manage_pasteObjects(self, cp)
forum.manage_delObjects([obj_id])
elif IComment.providedBy(obj):
return CopyContainer.manage_pasteObjects(self, cp)
else:
raise ValueError('Invalid type of content')
registerType(PloneboardConversation, PROJECTNAME)
| Python |
from zope.interface import implements
from AccessControl import ClassSecurityInfo
from Products.CMFCore.utils import getToolByName
from Products.Archetypes.public import BaseBTreeFolderSchema, Schema, TextField, LinesField
from Products.Archetypes.public import BaseBTreeFolder, registerType
from Products.Archetypes.public import TextAreaWidget, LinesWidget
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.Ploneboard.config import PROJECTNAME
from Products.Ploneboard.permissions import ViewBoard, SearchBoard, \
AddForum, ManageBoard
from Products.Ploneboard.content.PloneboardForum import PloneboardForum
from Products.Ploneboard.interfaces import IPloneboard
from Products.Ploneboard import utils
schema = BaseBTreeFolderSchema + Schema((
TextField('description',
searchable = 1,
default_content_type = 'text/html',
default_output_type = 'text/plain',
widget = TextAreaWidget(description = "Enter a brief description of the board.",
description_msgid = "help_description_board",
i18n_domain = "ploneboard",
label = "Description",
label_msgid = "label_description_board",
rows = 5)),
LinesField('categories',
widget = LinesWidget(
description = "Enter the categories you want to have available for forums, one category on each line.",
description_msgid = "help_categories_board",
label = "Categories",
label_msgid = "label_categories_board",
i18n_domain = "ploneboard")),
))
utils.finalizeSchema(schema)
class Ploneboard(BrowserDefaultMixin, BaseBTreeFolder):
"""Ploneboard is the outmost board object, what shows up in your site."""
implements(IPloneboard)
__implements__ = (BrowserDefaultMixin.__implements__, BaseBTreeFolder.__implements__,)
meta_type = 'Ploneboard'
schema = schema
_at_rename_after_creation = True
security = ClassSecurityInfo()
def getCatalog(self):
return getToolByName(self, 'portal_catalog')
security.declareProtected(ManageBoard, 'edit')
def edit(self, **kwargs):
"""Alias for update()
"""
self.update(**kwargs)
security.declareProtected(AddForum, 'addForum')
def addForum( self
, id
, title
, description ):
"""Add a forum to the board.
XXX: Should be possible to parameterise the exact type that is being
added.
"""
kwargs = {'title' : title, 'description' : description}
forum = PloneboardForum(id)
self._setObject(id, forum)
forum = getattr(self, id)
forum.initializeArchetype(**kwargs)
forum._setPortalTypeName('PloneboardForum')
forum.notifyWorkflowCreated()
# Enable topic syndication by default
syn_tool = getToolByName(self, 'portal_syndication', None)
if syn_tool is not None:
if (syn_tool.isSiteSyndicationAllowed() and not syn_tool.isSyndicationAllowed(forum)):
syn_tool.enableSyndication(forum)
#forum.setDescription(description)
return forum
security.declareProtected(ViewBoard, 'getForums')
def getForums(self, sitewide=False):
"""Return all the forums in this board."""
query = {'object_provides':'Products.Ploneboard.interfaces.IForum'}
if not sitewide:
query['path'] = '/'.join(self.getPhysicalPath())
return [f.getObject() for f in self.getCatalog()(query)]
security.declareProtected(ViewBoard, 'getForumIds')
def getForumIds(self):
"""Return all the forums in this board."""
return [f.getId for f in self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IForum')]
security.declareProtected(ManageBoard, 'removeForum')
def removeForum(self, forum_id):
"""Remove a forum from this board."""
self._delObject(forum_id)
security.declareProtected(SearchBoard, 'searchComments')
def searchComments(self, query):
"""This method searches through all forums, conversations and comments."""
return self.getCatalog()(**query)
security.declarePublic('getForum')
def getForum(self, forum_id):
"""Returns forum with specified forum id."""
#return getattr(self, forum_id, None)
forums = self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IForum',
getId=forum_id)
if forums:
return forums[0].getObject()
else:
return None
def __nonzero__(self):
return 1
registerType(Ploneboard, PROJECTNAME)
| Python |
from zope.interface import implements
from AccessControl import ClassSecurityInfo
from Acquisition import aq_chain, aq_inner
from OFS.CopySupport import CopyContainer
from OFS.Image import File
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import _createObjectByType, log_deprecated
from Products.Archetypes.public import BaseBTreeFolderSchema, Schema
from Products.Archetypes.public import TextField, LinesField, IntegerField
from Products.Archetypes.public import BaseBTreeFolder, registerType
from Products.Archetypes.public import TextAreaWidget, MultiSelectionWidget, IntegerWidget, SelectionWidget
from Products.Archetypes.public import DisplayList
from Products.Ploneboard.config import PROJECTNAME, HAS_SIMPLEATTACHMENT
from Products.Ploneboard.permissions import ViewBoard, ManageForum, AddConversation, MoveConversation
from Products.Ploneboard.interfaces import IPloneboard, IForum
from Products.Ploneboard.interfaces import IConversation, IComment
from Products.Ploneboard import utils
from Products.CMFPlone.interfaces.NonStructuralFolder \
import INonStructuralFolder as ZopeTwoINonStructuralFolder
from Products.CMFPlone.interfaces.structure import INonStructuralFolder
from Products.Archetypes.event import ObjectInitializedEvent
from zope import event
schema = BaseBTreeFolderSchema + Schema((
TextField('description',
searchable = 1,
default_content_type = 'text/html',
default_output_type = 'text/plain',
widget = TextAreaWidget(
description = "Brief description of the forum topic.",
description_msgid = "help_description_forum",
label = "Description",
label_msgid = "label_description_forum",
i18n_domain = "ploneboard",
rows = 5)),
LinesField('category',
write_permission = ManageForum,
vocabulary = 'getCategories',
widget = MultiSelectionWidget(
description = "Select which category the forum should be listed under. A forum can exist in multiple categories, although using only one category is recommended.",
description_msgid = "help_category",
condition="object/getCategories",
label = "Category",
label_msgid = "label_category",
i18n_domain = "ploneboard",
)),
IntegerField('maxAttachments',
write_permission = ManageForum,
default = 1,
widget = IntegerWidget(
description = "Select the maximum number of attachments per comment.",
description_msgid = "help_maxattachments",
label = "Maximum number of attachments",
label_msgid = "label_maxattachments",
i18n_domain = "ploneboard",
)),
IntegerField('maxAttachmentSize',
write_permission = ManageForum,
vocabulary = 'getAttachmentSizes',
default = 100,
widget = SelectionWidget(
description = "Select the maximum size for attachments.",
description_msgid = "help_maxattachmentsize",
label = "Maximum attachment size",
label_msgid = "label_maxattachmentsize",
i18n_domain = "ploneboard",
)),
))
utils.finalizeSchema(schema)
if not HAS_SIMPLEATTACHMENT:
schema['maxAttachments'].mode="r"
schema['maxAttachments'].default=0
schema['maxAttachments'].widget.visible={'edit' : 'invisible', 'view' : 'invisible' }
schema['maxAttachmentSize'].widget.visible={'edit' : 'invisible', 'view' : 'invisible' }
class PloneboardForum(BaseBTreeFolder):
"""A Forum contains conversations."""
implements(IForum, INonStructuralFolder)
__implements__ = (BaseBTreeFolder.__implements__, ZopeTwoINonStructuralFolder)
meta_type = 'PloneboardForum'
schema = schema
_at_rename_after_creation = True
security = ClassSecurityInfo()
def getCatalog(self):
return getToolByName(self, 'portal_catalog')
security.declareProtected(ManageForum, 'edit')
def edit(self, **kwargs):
"""Alias for update()
"""
self.update(**kwargs)
security.declarePublic('synContentValues')
def synContentValues(self):
return (self.getConversations())
security.declareProtected(ViewBoard, 'getBoard')
def getBoard(self):
"""Returns containing or nearest board."""
# Try containment
stoptypes = ['Plone Site']
for obj in aq_chain(aq_inner(self)):
if hasattr(obj, 'portal_type') and obj.portal_type not in stoptypes:
if IPloneboard.providedBy(obj):
return obj
return None
security.declareProtected(AddConversation, 'addConversation')
def addConversation(self, title, text=None, creator=None, files=None, **kwargs):
"""Adds a new conversation to the forum.
XXX should be possible to parameterise the exact type that is being
added.
"""
id = self.generateId(prefix='')
conv = _createObjectByType('PloneboardConversation', self, id)
event.notify(ObjectInitializedEvent(conv))
# XXX: There is some permission problem with AT write_permission
# and using **kwargs in the _createObjectByType statement.
conv.setTitle(title)
if creator is not None:
conv.setCreators([creator])
if text is not None or files:
m = _createObjectByType('PloneboardComment', conv, conv.generateId(prefix=''))
event.notify(ObjectInitializedEvent(m))
# XXX: There is some permission problem with AT write_permission
# and using **kwargs in the _createObjectByType statement.
m.setTitle(title)
if text is not None:
m.setText(text)
if creator is not None:
m.setCreators([creator])
# Create files in message
if files:
for file in files:
# Get raw filedata, not persistent object with reference to tempstorage
attachment = File(file.getId(), file.title_or_id(), str(file.data), file.getContentType())
m.addAttachment(attachment)
m.reindexObject()
conv.reindexObject()
return conv
security.declareProtected(ViewBoard, 'getConversation')
def getConversation(self, conversation_id, default=None):
"""Returns the conversation with the given conversation id."""
#return self._getOb(conversation_id, default)
catalog = self.getCatalog()
conversations = catalog(
object_provides=IConversation.__identifier__,
getId=conversation_id,
path='/'.join(self.getPhysicalPath()))
if conversations:
return conversations[0].getObject()
else:
return None
security.declareProtected(ManageForum, 'removeConversation')
def removeConversation(self, conversation_id):
"""Removes a conversation with the given conversation id from the forum."""
self._delObject(conversation_id)
security.declareProtected(ViewBoard, 'getConversations')
def getConversations(self, limit=20, offset=0):
"""Returns conversations."""
log_deprecated("Products.Ploneboard.content.PloneboardForum.PloneboardForum.getConversations is deprecated in favor of Products.Ploneboard.browser.forum.ForumView.getConversations")
catalog = self.getCatalog()
return [f.getObject() for f in \
catalog(object_provides=IConversation.__identifier__,
sort_on='modified',
sort_order='reverse',
sort_limit=(offset+limit),
path='/'.join(self.getPhysicalPath()))[offset:offset+limit]]
security.declareProtected(ViewBoard, 'getNumberOfConversations')
def getNumberOfConversations(self):
"""Returns the number of conversations in this forum."""
log_deprecated("Products.Ploneboard.content.PloneboardForum.PloneboardForum.getNumberOfConversations is deprecated in favor of Products.Ploneboard.browser.forum.ForumView.getNumberOfConversations")
return len(self.getCatalog()(
object_provides=IConversation.__identifier__,
path='/'.join(self.getPhysicalPath())))
security.declareProtected(ViewBoard, 'getNumberOfComments')
def getNumberOfComments(self):
"""Returns the number of comments to this forum."""
log_deprecated("Products.Ploneboard.content.PloneboardForum.PloneboardForum.getNumberOfComments is deprecated in favor of Products.Ploneboard.browser.forum.ForumView.getNumberOfComments")
return len(self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IComment',
path='/'.join(self.getPhysicalPath())))
security.declareProtected(ViewBoard, 'getLastConversation')
def getLastConversation(self):
"""
Returns the last conversation.
"""
# XXX Is Created or Modified the most interesting part?
res = self.getCatalog()(
object_provides=IConversation.__identifier__,
sort_on='created', sort_order='reverse', sort_limit=1,
path='/'.join(self.getPhysicalPath()))
if res:
return res[0].getObject()
else:
return None
security.declareProtected(ViewBoard, 'getLastCommentDate')
def getLastCommentDate(self):
"""
Returns a DateTime corresponding to the timestamp of the last comment
for the forum.
"""
res = self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IComment',
sort_on='created', sort_order='reverse', sort_limit=1,
path='/'.join(self.getPhysicalPath()))
if res:
return res[0].created
else:
return None
security.declareProtected(ViewBoard, 'getLastCommentAuthor')
def getLastCommentAuthor(self):
"""
Returns the name of the author of the last comment.
"""
res = self.getCatalog()(
object_provides='Products.Ploneboard.interfaces.IComment',
sort_on='created', sort_order='reverse', sort_limit=1,
path='/'.join(self.getPhysicalPath()))
if res:
return res[0].Creator
else:
return None
# Vocabularies
security.declareProtected(ViewBoard, 'getCategories')
def getCategories(self):
value = []
board = self.getBoard()
if board is not None and hasattr(board, 'getCategories'):
categories = board.getCategories()
if categories is not None:
value = [(c,c) for c in categories]
value.sort()
return DisplayList(value)
security.declareProtected(ViewBoard, 'getAttachmentSizes')
def getAttachmentSizes(self):
voc = DisplayList()
voc.add(10, '10 kilobyte')
voc.add(100, '100 kilobyte')
voc.add(1000, '1 megabyte')
voc.add(10000, '10 megabyte')
voc.add(-1, 'unlimited')
return voc
security.declarePublic('getConversationBatchSize')
def getConversationBatchSize(self):
pb_tool = getToolByName(self, 'portal_ploneboard')
prop_name = "%s_ConversationBatchSize" % self.getId()
if pb_tool.hasProperty(prop_name):
return pb_tool.getProperty(prop_name)
prop_name = "ConversationBatchSize"
if pb_tool.hasProperty(prop_name):
return pb_tool.getProperty(prop_name)
return 30
############################################################################
# Folder methods, indexes and such
security.declareProtected(MoveConversation, 'manage_pasteObjects')
def manage_pasteObjects(self, cp):
""" move another conversation """
CopyContainer.manage_pasteObjects(self, cp)
def __nonzero__(self):
return 1
registerType(PloneboardForum, PROJECTNAME)
| Python |
import Ploneboard
import PloneboardForum
import PloneboardConversation
import PloneboardComment
| Python |
try:
from lipsum import markupgenerator
except ImportError:
class markupgenerator(object):
def __init__(self,sample,dictionary):pass
def generate_sentence(self):return 'subject'
def generate_paragraph(self):return 'Please install lorem-ipsum-generator.'
import transaction
from time import time
from random import betavariate
from Products.CMFCore.utils import getToolByName
from Products.Ploneboard.config import EMOTICON_TRANSFORM_MODULE
from Products.Ploneboard.config import URL_TRANSFORM_MODULE
from Products.Ploneboard.config import SAFE_HTML_TRANSFORM_MODULE
def setupVarious(context):
if not context.readDataFile('ploneboard_various.txt'):
return
site=context.getSite()
addTransforms(site)
setupCommentLocalRoles(site)
addPlacefulPolicy(site)
def addTransforms(site):
pb_tool = getToolByName(site, 'portal_ploneboard')
pb_tool.registerTransform('text_to_emoticons', EMOTICON_TRANSFORM_MODULE, 'Graphical smilies')
pb_tool.registerTransform('url_to_hyperlink', URL_TRANSFORM_MODULE, 'Clickable links')
pb_tool.registerTransform('safe_html', SAFE_HTML_TRANSFORM_MODULE, 'Remove dangerous HTML')
def lotsofposts(context):
debug = True
if not context.readDataFile('ploneboard_lotsofposts.txt'):
return
sample = context.readDataFile('rabbit.txt')
dictionary = context.readDataFile('vocab.txt')
mg = markupgenerator(sample=sample, dictionary=dictionary)
# XXX CREATE 1000 REAL USERS WITH AVATARS FOR POSTING
# For every forum, create random content for a total of a configurable number
totalgoal = 100000
site=context.getSite()
board = site.ploneboard # From the basicboard dependency
forums = board.getForums()
for forum in forums:
count = int(totalgoal * betavariate(1, len(forums)-1))
i = 0
while i < count:
start = time()
conv = forum.addConversation(mg.generate_sentence(), mg.generate_paragraph())
i+=1
if debug:
print "Creating conversation %s of %s in %s in %.5fs" % (i, count, forum.getId(), time()-start)
if i % 1000 == 0:
transaction.get().savepoint(optimistic=True)
if debug:
print "\nSAVEPOINT\n"
# XXX add arbitrary number of comments, which all count towards count
for j in range(0,int(betavariate(1, 5) * max(300,(count/10)))):
if i < count:
start = time()
conv.addComment(mg.generate_sentence(), mg.generate_paragraph())
i+=1
if debug:
print "Creating comment %s of %s in %s in %.5fs" % (i, count, forum.getId(), time()-start)
if i % 1000 == 0:
transaction.get().savepoint(optimistic=True)
if debug:
print "\nSAVEPOINT\n"
else:
continue
def setupCommentLocalRoles(self):
pc=getToolByName(self, 'portal_catalog')
pu=getToolByName(self, 'plone_utils')
comments=pc(object_provides='Products.Ploneboard.interfaces.IComment')
comments=[x.getObject() for x in comments if x.getObject()]
count=0
for c in comments:
# Do not update needlessly. Screws up modified
if not pu.isLocalRoleAcquired(c):
pu.acquireLocalRoles(c, 0)
count += 1
self.plone_log('setupCommentLocalRoles', 'Updated %d of total %d comments' % (count, len(comments)))
def addPlacefulPolicy(self):
pw=getToolByName(self, 'portal_placeful_workflow')
new_id = 'EditableComment'
if new_id not in pw.objectIds():
pw.manage_addWorkflowPolicy(new_id)
ob = pw[new_id]
ob.setChain('PloneboardComment', 'ploneboard_editable_comment_workflow')
| Python |
"""
$Id: interfaces.py 55766 2007-12-18 11:08:52Z wichert $
"""
# Dependency on Zope 2.8.x (or greater) or Five
from zope.interface import Interface, Attribute
class IPloneboard(Interface):
"""
Ploneboard is the outmost board object, what shows up in your site.
The board contains forums. Board is folderish. The number of items contained
in Board should be limited and steady.
This is an optional type.
"""
def addForum(id, title, description):
"""
The method add_forum takes id, title and description and creates a
forum inside the board.
Should this go away and rather just use the regular Plone content
creation? That would make it easier to switch content types.
"""
def removeForum(forum_id):
"""
The method remove_forum removes the forum with the specified id from
this board.
"""
def getForum(forum_id):
"""
Return the forum for forum_id, or None.
"""
def getForumIds():
"""
Returns the ids of the forums.
If this is the only board in a site, it should return forum ids for
the entire site, not just inside the board.
"""
def getForums():
"""
Return the forums
If this is the only board in a site, it should return forums for the
entire site, not just inside the board.
"""
def searchComments(query):
"""
This method searches through all forums, conversations and comments.
"""
class IForum(Interface):
"""
A Forum contains conversations. Forum is folderish. The number of items contained
in Forum is high and increases, so it is probably a good idea to use BTrees
for indexing.
"""
def getBoard():
"""
Gets the containing board.
Returns None if there are no boards in the site.
"""
def addConversation(subject, body, **kw):
"""
Adds a new conversation to the forum.
Should this go away and rather just use the regular Plone content
creation? That would make it easier to switch content types.
"""
def getConversation(conversation_id):
"""
Returns the conversation with the given conversation id.
"""
def removeConversation(conversation_id):
"""
Removes a conversation with the given conversation id from the forum.
"""
def getConversations(limit=20, offset=0):
"""
Returns a maximum of 'limit' conversations, the last updated conversations first,
starting from 'offset'.
"""
def getNumberOfConversations():
"""
Returns the number of conversations in this forum.
"""
def getNumberOfComments():
"""
Returns the number of comments to this forum.
"""
class IConversation(Interface):
"""
Conversation contains comments. The number of comments contained in
Conversation is high and increases. It is recommended to use BTree for
indexing and to autogenerate ids for contained comments.
"""
def getForum():
"""
Returns the containing forum.
"""
def addComment(comment_subject, comment_body):
"""
Adds a new comment with subject and body.
"""
def getComment(comment_id):
"""
Returns the comment with the specified id.
"""
def getComments(limit=30, offset=0, **kw):
"""
Retrieves the specified number of comments with offset 'offset'.
In addition there are kw args for sorting and retrieval options.
"""
def getNumberOfComments():
"""
Returns the number of comments to this conversation.
"""
def getLastCommentDate():
"""
Returns a DateTime corresponding to the timestamp of the last comment
for the conversation.
"""
def getLastCommentAuthor():
"""
Returns the author of the last comment for the conversation.
"""
def getLastComment():
"""
Returns the last comment as full object (no Brain).
If there is no such one then None is returned
"""
def getRootComments():
"""
Return a list all comments rooted to the board; ie comments which
are not replies to other comments.
"""
def getFirstComment():
"""
Returns the first (aka root) comment in this IConversation.
"""
class IComment(Interface):
"""
A comment contains regular text body and metadata.
"""
def getConversation():
"""
Returns the containing conversation.
"""
def addReply(comment_subject, comment_body):
"""
Add a response to this comment of same type as object itself.
"""
def inReplyTo():
"""
Returns the comment object this comment is a reply to. If it is the
topmost comment (ie: first comment in a conversation), it returns None.
"""
def getReplies():
"""
Returns the comments that were replies to this one.
"""
def getTitle():
"""
Returns the title of the comment.
"""
def getText():
"""
Returns the text of the comment.
"""
def delete():
"""
Delete this comment. Will ensure to clean up any comments
that were replies to this comment.
"""
class IAttachmentSupport(Interface):
"""
Attachment support, typically for comments
"""
def addAttachment(file, title=None):
"""
Add a file attachment.
"""
def hasAttachment():
"""
Return 0 or 1 if this comment has attachments.
"""
def getNumberOfAllowedAttachments():
"""
Return the number of allowed attachments
"""
def getNumberOfAttachments():
"""
Return the number of attachments
"""
def getAttachments():
"""
Return all attachments
"""
class IPloneboardTool(Interface):
"""Services for Ploneboard: Handles text transformation plugins and attached files.
"""
id = Attribute('id', 'Must be set to "portal_ploneboard"')
def registerTransform(name, module, friendlyName=None):
"""Adds a text transformation module to portal_transforms.
Used from the configuration panel
"""
def unregisterTransform(name):
"""Removes the transformation module from portal_transforms
Used from the configuration panel
"""
def enableTransform(name, enabled=True):
"""Globally enables a transform (site wide)
"""
def unregisterAllTransforms():
"""Removes from portal_transforms all transform modules added with Ploneboard
"""
def getTransforms():
"""Returns list of transform names.
"""
def getTransformFriendlyName(name):
"""Returns a friendly name for the given transform.
"""
def getEnabledTransforms():
"""Returns list of names for enabled transforms.
"""
def performCommentTransform(orig, **kwargs):
"""This performs the comment transform - also used for preview.
"""
def getUploadedFiles():
"""Stores files from request in session and returns these files
"""
def clearUploadedFiles():
"""Removes uploaded files from session machinery
"""
| Python |
"""
$Id: PloneboardTool.py 58018 2008-02-01 16:30:48Z wichert $
"""
import Globals
from AccessControl import ClassSecurityInfo
from OFS.Image import File
from OFS.Folder import Folder
from ZPublisher.HTTPRequest import FileUpload
from ZODB.PersistentMapping import PersistentMapping
from zope.interface import implements
from Products.CMFCore.utils import UniqueObject
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.utils import registerToolInterface
from Products.CMFCore.permissions import View
from Products.CMFCore.permissions import ManagePortal
from Products.CMFCore.ActionProviderBase import ActionProviderBase
from Products.Ploneboard.config import PLONEBOARD_TOOL
from Products.Ploneboard.interfaces import IPloneboardTool
class PloneboardTool(UniqueObject, Folder, ActionProviderBase):
implements(IPloneboardTool)
id = PLONEBOARD_TOOL
meta_type = 'Ploneboard Tool'
security = ClassSecurityInfo()
def __init__(self):
self.transforms = PersistentMapping()
security.declarePrivate('registerTransform')
def registerTransform(self, name, module, friendlyName=None):
tr_tool = getToolByName(self, 'portal_transforms')
if name not in tr_tool.objectIds():
tr_tool.manage_addTransform(name, module)
wasAdded = True
else:
wasAdded = False
if not friendlyName:
friendlyName = name
if name not in self.transforms:
self.transforms[name] = {'enabled' : True,
'friendlyName' : friendlyName,
'wasAdded' : wasAdded
}
security.declarePrivate('unregisterTransform')
def unregisterTransform(self, name):
tr_tool = getToolByName(self, 'portal_transforms')
if self.transforms[name]['wasAdded']:
try:
tr_tool._delObject(name)
except AttributeError, e:
pass
del self.transforms[name]
security.declareProtected(ManagePortal, 'enableTransform')
def enableTransform(self, name, enabled=True):
"""Change the activity status for a transform."""
self.transforms[name]['enabled'] = enabled
security.declarePrivate('unregisterAllTransforms')
def unregisterAllTransforms(self):
tr_tool = getToolByName(self, 'portal_transforms')
for transform_name in self.getTransforms():
if not self.transforms[transform_name] or \
self.transforms[transform_name].get('wasAdded', False):
try:
tr_tool._delObject(transform_name)
except AttributeError:
pass
self.transforms.clear()
security.declareProtected(ManagePortal, 'getTransforms')
def getTransforms(self):
"""Returns list of transform names."""
return self.transforms.keys()
security.declareProtected(ManagePortal, 'getTransformFriendlyName')
def getTransformFriendlyName(self, name):
"""Returns a friendly name for the given transform."""
return self.transforms[name]['friendlyName']
security.declareProtected(View, 'getEnabledTransforms')
def getEnabledTransforms(self):
"""Returns list of names for enabled transforms"""
return [name for name in self.transforms.keys() if self.transforms[name]['enabled']]
security.declareProtected(View, 'performCommentTransform')
def performCommentTransform(self, orig, **kwargs):
"""This performs the comment transform - also used for preview."""
transform_tool = getToolByName(self, 'portal_transforms')
content_type=kwargs.get("content_type", "text/plain")
# This one is very important, because transform object has no
# acquisition context inside it, so we need to pass it our one
context=kwargs.get('context', self)
data = transform_tool._wrap(content_type)
for transform in self.getEnabledTransforms():
data = transform_tool.convert(transform, orig, data, context)
orig = data.getData()
return orig
# File upload - should be in a View once we get formcontroller support in Views
security.declareProtected(View, 'getUploadedFiles')
def getUploadedFiles(self):
request = self.REQUEST
result = []
files = request.get('files', [])
if not files:
return []
sdm = getToolByName(self, 'session_data_manager', None)
if sdm is not None:
pt = getToolByName(self, 'plone_utils')
hassession = sdm.hasSessionData()
for file in files:
if isinstance(file, basestring) and hassession:
# Look it up from session
oldfile = request.SESSION.get(file, None)
if oldfile is not None:
result.append(oldfile)
if isinstance(file, FileUpload):
if file:
filename = file.filename.split('\\')[-1]
id = pt.normalizeString(filename)
ct = file.headers.getheader('content-type')
if ct is None:
ct = ''
newfile = File(id, id, file, ct)
request.SESSION[id] = newfile
result.append(newfile)
# delete files form session if not referenced
new_filelist = [x.getId() for x in result]
old_filelist = hassession and request.SESSION.get('ploneboard_uploads', []) or []
for removed in [f for f in old_filelist if f not in new_filelist]:
del request.SESSION[f]
if hassession or new_filelist:
request.SESSION['ploneboard_uploads'] = new_filelist
return result
security.declareProtected(View, 'clearUploadedFiles')
def clearUploadedFiles(self):
# Get previously uploaded files with a reference in request
# + files uploaded in this request
# XXX Add variable to keep track of filenames?
request = self.REQUEST
sdm = getToolByName(self, 'session_data_manager', None)
if sdm is not None:
if sdm.hasSessionData():
old_filelist = request.SESSION.get('ploneboard_uploads', None)
if old_filelist is not None:
for file in old_filelist:
if request.SESSION.has_key(file):
del request.SESSION[file]
del request.SESSION['ploneboard_uploads']
Globals.InitializeClass(PloneboardTool)
registerToolInterface(PLONEBOARD_TOOL, IPloneboardTool)
| Python |
from Products.Ploneboard.interfaces import IConversation
from plone.indexer.decorator import indexer
@indexer(IConversation)
def num_comments(obj):
return obj.getNumberOfComments()
| Python |
from Testing import ZopeTestCase
# Make the boring stuff load quietly
ZopeTestCase.installProduct('SimpleAttachment')
ZopeTestCase.installProduct('CMFPlacefulWorkflow')
ZopeTestCase.installProduct('Ploneboard')
from Products.PloneTestCase import PloneTestCase
PloneTestCase.setupPloneSite(products=('SimpleAttachment', 'CMFPlacefulWorkflow', 'Ploneboard'))
class PloneboardTestCase(PloneTestCase.PloneTestCase):
class Session(dict):
def set(self, key, value):
self[key] = value
def _setup(self):
PloneTestCase.PloneTestCase._setup(self)
self.app.REQUEST['SESSION'] = self.Session()
class PloneboardFunctionalTestCase(PloneTestCase.FunctionalTestCase):
class Session(dict):
def set(self, key, value):
self[key] = value
def _setup(self):
PloneTestCase.FunctionalTestCase._setup(self)
self.app.REQUEST['SESSION'] = self.Session()
| Python |
import Products.Five
import Products.ATContentTypes
from Products.CMFPlacefulWorkflow.WorkflowPolicyConfig import manage_addWorkflowPolicyConfig
from DateTime import DateTime
def addMember(self, username, fullname="", email="", roles=('Member',), last_login_time=None):
self.portal.portal_membership.addMember(username, 'secret', roles, [])
member = self.portal.portal_membership.getMemberById(username)
member.setMemberProperties({'fullname': fullname, 'email': email,
'last_login_time': DateTime(last_login_time),})
def setUpDefaultMembersBoardAndForum(self):
addMember(self, 'member1', 'Member one', roles=('Member',))
addMember(self, 'member2', 'Member two', roles=('Member',))
addMember(self, 'manager1', 'Manager one', roles=('Manager',))
addMember(self, 'reviewer1', 'Manager one', roles=('Reviewer',))
self.workflow = self.portal.portal_workflow
self.setRoles(('Manager',))
self.portal.invokeFactory('Ploneboard', 'board1')
self.board = self.portal.board1
self.forum = self.board.addForum('forum1', 'Forum 1', 'Forum one')
self.setRoles(('Member',))
def disableScriptValidators(portal):
from Products.CMFFormController.FormController import ANY_CONTEXT, ANY_BUTTON
scripts = ['add_comment_script', 'add_conversation_script', 'add_forum_script']
try:
for v in portal.portal_skins.ploneboard_scripts.objectValues():
if v.id in scripts:
v.manage_doCustomize('custom')
portal.portal_form_controller.addFormValidators(v.id, ANY_CONTEXT, ANY_BUTTON, [])
except:
pass
def logoutThenLoginAs(self, browser, userid):
browser.open('%s/logout' % self.portal.absolute_url())
browser.open('%s/login_form' % self.portal.absolute_url())
browser.getControl(name='came_from').value = self.portal.absolute_url()
browser.getControl(name='__ac_name').value = userid
browser.getControl(name='__ac_password').value = 'secret'
browser.getControl('Log in').click()
return
def setupEditableForum(self, forum):
self.setRoles(('Manager',))
manage_addWorkflowPolicyConfig(forum)
pw_tool = self.portal.portal_placeful_workflow
config = pw_tool.getWorkflowPolicyConfig(forum)
config.setPolicyIn(policy='EditableComment')
config.setPolicyBelow(policy='EditableComment', update_security=True)
self.setRoles(('Member',))
def lockBoard(self, state):
self.setRoles(('Manager',))
self.workflow.doActionFor(self.board, state)
self.setRoles(('Member',))
| Python |
#
# Conversation tests
#
import transaction
import unittest
from zope.interface.verify import verifyClass, verifyObject
import PloneboardTestCase
from Products.CMFPlone.utils import _createObjectByType
from Products.Ploneboard.interfaces import IConversation
from Products.Ploneboard.content.PloneboardConversation import PloneboardConversation
from Products.Ploneboard.tests.utils import addMember
class TestPloneboardConversation(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
self.conv = self.forum.addConversation('subject', 'body')
self.comment = self.conv.getComments()[0]
def testInterfaceVerification(self):
self.failUnless(verifyClass(IConversation, PloneboardConversation))
def testInterfaceConformance(self):
self.failUnless(IConversation.providedBy(self.conv))
self.failUnless(verifyObject(IConversation, self.conv))
def testGetForum(self):
self.failUnlessEqual(self.forum, self.conv.getForum())
def testAddComment(self):
msg = self.conv.addComment('msg_title', 'msg_text')
self.assertEqual(msg.getTitle(), 'msg_title')
self.assertEqual(msg.getText(), 'msg_text')
def testGetLastComment(self):
msg = self.conv.addComment('last_comment_title', 'last_comment_text')
self.assertEqual(self.conv.getLastComment().getTitle(), msg.getTitle())
msg2 = self.conv.addComment('last_comment_title2', 'last_comment_text')
self.assertEqual(self.conv.getLastComment().getTitle(), msg2.getTitle())
def testGetComment(self):
conv = self.conv
self.failUnlessEqual(self.comment, conv.getComment(self.comment.getId()))
def testGetComments(self):
conv = self.conv
comment2 = conv.addComment('subject2', 'body2')
self.failUnlessEqual(conv.getComments(), [self.comment, comment2])
def testGetCommentsSlicing(self):
conv = self.conv
comment2 = conv.addComment('subject2', 'body2')
self.failUnlessEqual(conv.getComments(limit=1, offset=0), [self.comment])
self.failUnlessEqual(conv.getComments(limit=1, offset=1), [comment2])
def testGetNumberOfComments(self):
conv = self.conv
self.failUnlessEqual(conv.getNumberOfComments(), 1)
conv.addComment('followup', 'text')
self.failUnlessEqual(conv.getNumberOfComments(), 2)
# Check to make sure it doesn't count comments elsewhere
conv2 = self.forum.addConversation('subject', 'body')
self.failUnlessEqual(conv.getNumberOfComments(), 2)
def testGetLastCommentDate(self):
conv = self.conv
self.failUnlessEqual(self.comment.created(), conv.getLastCommentDate())
comment = conv.addComment('followup', 'text')
self.failUnlessEqual(comment.created(), conv.getLastCommentDate())
def testGetRootComments(self):
conv = self.conv
threaded = conv.getRootComments()
self.failUnlessEqual(len(threaded), 1)
conv.addComment("base2", "base two")
threaded = conv.getRootComments()
self.failUnlessEqual(len(threaded), 2)
reply = self.comment.addReply('anotherfollowup', 'moretext')
threaded = conv.getRootComments()
self.failUnlessEqual(len(threaded), 2)
self.failUnlessEqual(len(self.comment.getReplies()), 1)
def testGetFirstComment(self):
conv = self.conv
first = conv.getFirstComment()
self.failUnless(first)
self.failUnlessEqual(first, conv.objectValues()[0])
conv.addComment('followup', 'text')
self.failUnlessEqual(first, conv.getFirstComment())
def testModificationDate(self):
conv = self.conv
modified1 = conv.modified()
from time import sleep
sleep(0.1) # To make sure modified is different
conv.addComment('followup', 'text')
modified2 = conv.modified()
self.failIfEqual(modified1, modified2)
conv.objectValues()[0].addReply('followup', 'text')
sleep(0.1) # To make sure modified is different
modified3 = conv.modified()
self.failIfEqual(modified1, modified3)
self.failIfEqual(modified2, modified3)
def XXXtest_delObject(self):
forum = self.forum
conv = forum.addConversation('subject', 'body')
msg = conv.addComment('msg_title', 'msg_text')
#msg = conv.objectValues()[0]
r = msg.addReply('reply_subject', 'reply_body')
r1 = msg.addReply('reply_subject1', 'reply_body1')
r2 = msg.addReply('reply_subject2', 'reply_body2')
r2.addReply('rs', 'rb').addReply('rs1', 'rb1').addReply('rs2', 'rb2')
self.assertEqual(conv.getNumberOfComments(), 7)
conv._delObject(r2.getId(), recursive=1)
self.assertEqual(conv.getNumberOfComments(), 3)
# check if we delete conversation so we delete root comment
self.assertEqual(forum.getNumberOfConversations(), 1)
conv._delObject(msg.getId(), recursive=1)
self.assertEqual(forum.getNumberOfConversations(), 0)
# check if Comment was uncataloged
self.failIf(r.getId() in [v.id for v in self.catalog(meta_type='PloneboardComment', id=r.getId())])
self.failIf(msg.getId() in [v.id for v in self.catalog(meta_type='PloneboardComment', id=msg.getId())])
# Checking non recursive delete
conv = forum.addConversation('subject', 'body')
msg = conv.objectValues()[0]
r = msg.addReply('reply_subject', 'reply_body')
self.assertEqual(conv.getNumberOfComments(), 2)
self.failUnless(r.getId() in [v.getId for v in self.catalog(meta_type='PloneboardComment', id=r.getId())])
conv._delObject(msg.getId())
self.assertEqual(conv.getNumberOfComments(), 1)
self.failUnless(r.getId() in [v.getId for v in self.catalog(meta_type='PloneboardComment', id=r.getId())])
def testNewConversationIsVisibleToAnonymous(self):
conv = self.forum.addConversation('subject2', 'body2')
conv.addComment("comment", "comment")
id = conv.getId()
self.logout()
convs = self.forum.getConversations()
# self.failUnless(id in [x.getId() for x in convs])
comments = conv.getComments()
self.assertEqual(len(comments), 2)
def testAddCommentAsAnonymousTakesOwnerOfForumAndCreatorAnonymous(self):
conv = self.conv
self.logout()
reply = conv.addComment('reply1', 'body1', creator='Anonymous')
self.assertEqual(conv.getForum().owner_info()['id'], reply.owner_info()['id'])
self.assertEqual(reply.Creator(), 'Anonymous')
def testAddCommentAsNotAnonymousLeavesOwnershipAlone(self):
conv = self.conv
addMember(self, 'member2')
self.login('member2')
self.assertNotEqual(conv.getForum().owner_info()['id'], 'member2')
reply = conv.addComment('reply1', 'body1')
self.assertEqual(reply.owner_info()['id'], 'member2')
def testDuplicateConversations(self):
conv2 = self.forum.addConversation('subject2', 'body2')
comment = conv2.addComment('subject2', 'body2')
transaction.savepoint(optimistic=True)
cp = self.forum.manage_copyObjects(conv2.getId())
self.failUnlessRaises(ValueError, self.conv.manage_pasteObjects, cp)
def testMergeConversations(self):
conv2 = self.forum.addConversation('subject2', 'body2')
comment = conv2.getComments()[0]
transaction.savepoint(optimistic=True)
self.conv.manage_pasteObjects(self.forum.manage_cutObjects(conv2.getId()))
self.failUnless(comment.getId() in self.conv.objectIds())
self.failUnless(len(self.conv.getComments()) == 2)
self.failIf(conv2.getId() in self.forum.objectIds())
def testMoveCommentToConversation(self):
conv2 = self.forum.addConversation('subject2', 'body2')
comment = conv2.addComment('subject2', 'body2')
transaction.savepoint(optimistic=True)
self.conv.manage_pasteObjects(conv2.manage_cutObjects(comment.getId()))
self.failUnless(comment.getId() in self.conv.objectIds())
self.failUnless(len(self.conv.getComments()) == 2)
self.failUnless(conv2.getId() in self.forum.objectIds()) # We only moved the comment
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPloneboardConversation))
return suite
| Python |
#
# Ploneboard tests
#
from Products.Ploneboard.tests import PloneboardTestCase
# Catch errors in Install
from Products.Ploneboard.Extensions import Install
from Products.CMFCore.utils import getToolByName
class TestSetup(PloneboardTestCase.PloneboardTestCase):
def testSkins(self):
portal_skins = self.portal.portal_skins.objectIds()
skins = (
'ploneboard_images',
'ploneboard_scripts',
'ploneboard_templates',
)
for skin in skins:
self.failUnless(skin in skins)
def testPortalTypes(self):
portal_types = self.portal.portal_types.objectIds()
content_types = (
'Ploneboard',
'PloneboardForum',
'PloneboardConversation',
'PloneboardComment',
)
for content_type in content_types:
self.failUnless(content_type in portal_types)
def testTools(self):
from Products.Ploneboard.config import PLONEBOARD_TOOL
tool_names = (
PLONEBOARD_TOOL,
)
for tool_name in tool_names:
self.failUnless(tool_name in self.portal.objectIds())
def testTransforms(self):
from Products.Ploneboard.config import PLONEBOARD_TOOL
tool = getToolByName(self.portal, PLONEBOARD_TOOL)
transforms = [t for t in tool.getEnabledTransforms()]
self.failUnless('safe_html' in transforms)
self.failUnless('text_to_emoticons' in transforms)
self.failUnless('url_to_hyperlink' in transforms)
def testCatalogIndex(self):
ct = getToolByName(self.portal, 'portal_catalog')
self.failUnless('object_provides' in ct.indexes())
self.failUnless('num_comments' in ct.indexes())
self.failUnless('num_comments' in ct.schema())
def testPortalFactorySetup(self):
portal_factory = getToolByName(self.portal, 'portal_factory')
factoryTypes = portal_factory.getFactoryTypes().keys()
for t in ['Ploneboard', 'PloneboardComment', 'PloneboardConversation', 'PloneboardForum']:
self.failUnless(t in factoryTypes)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestSetup))
return suite
| Python |
"""Ploneboard functional doctests. This module collects all *.txt
files in the tests directory and runs them. (stolen from Plone)
"""
import os, sys
import glob
import doctest
import unittest
from Globals import package_home
from Testing.ZopeTestCase import FunctionalDocFileSuite as Suite
from Products.Ploneboard.config import GLOBALS
# Load products
from Products.Ploneboard.tests.PloneboardTestCase import PloneboardFunctionalTestCase
REQUIRE_TESTBROWSER = ['MemberPostingForum.txt', 'MemberOnlyForum.txt',
'MemberEditsComment.txt', 'AdminLocksBoard.txt',
'FreeForAllForum.txt', 'ModeratedForum.txt']
OPTIONFLAGS = (doctest.REPORT_ONLY_FIRST_FAILURE |
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE)
def list_doctests():
home = package_home(GLOBALS)
return [filename for filename in
glob.glob(os.path.sep.join([home, 'tests', '*.txt']))]
def list_nontestbrowser_tests():
return [filename for filename in list_doctests()
if os.path.basename(filename) not in REQUIRE_TESTBROWSER]
def test_suite():
# BBB: We can obviously remove this when testbrowser is Plone
# mainstream, read: with Five 1.4.
try:
import Products.Five.testbrowser
except ImportError:
print >> sys.stderr, ("WARNING: testbrowser not found - you probably"
"need to add Five 1.4 to the Products folder. "
"testbrowser tests skipped")
filenames = list_nontestbrowser_tests()
else:
filenames = list_doctests()
return unittest.TestSuite(
[Suite(os.path.basename(filename),
optionflags=OPTIONFLAGS,
package='Products.Ploneboard.tests',
test_class=PloneboardFunctionalTestCase)
for filename in filenames]
)
| Python |
#
# Comment tests
#
import unittest
from zope.interface.verify import verifyClass
import PloneboardTestCase
from Products.Ploneboard.interfaces import IComment
from Products.Ploneboard.content.PloneboardComment import PloneboardComment
from Products.Ploneboard.config import HAS_SIMPLEATTACHMENT
from OFS.Image import File
from Products.CMFPlone.utils import _createObjectByType
from Products.Ploneboard.tests.utils import addMember
class TestPloneboardComment(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
self.conv = self.forum.addConversation('conv1', 'conv1 body')
self.comment = self.conv.addComment("comment1", "comment1 body")
def testInterfaceVerification(self):
self.failUnless(verifyClass(IComment, PloneboardComment))
def testGetConversation(self):
self.failUnlessEqual(self.comment.getConversation(), self.conv)
def testAddReply(self):
conv = self.conv
reply = self.comment.addReply('reply1', 'body1')
self.failUnless(reply in conv.objectValues())
def testAddReplyAsAnonymousTakesOwnerOfForumAndCreatorAnonymous(self):
conv = self.conv
self.logout()
reply = self.comment.addReply('reply1', 'body1', creator='Anonymous')
self.assertEqual(conv.getForum().owner_info()['id'], reply.owner_info()['id'])
self.assertEqual(reply.Creator(), 'Anonymous')
def testAddReplyAsNotAnonymousLeavesOwnershipAlone(self):
conv = self.conv
addMember(self, 'member2')
self.login('member2')
self.assertNotEqual(conv.getForum().owner_info()['id'], 'member2')
reply = self.comment.addReply('reply1', 'body1')
self.assertEqual(reply.owner_info()['id'], 'member2')
def testAddReplyAddsRe(self):
conv = self.conv
reply = self.comment.addReply('', 'body1')
self.assertEqual(reply.Title(), 'Re: ' + conv.Title())
def testAddReplyAddsReOnlyOnce(self):
conv = self.conv
reply = self.comment.addReply('', 'body1')
reply2 = reply.addReply('', 'body2')
self.assertEqual(reply2.Title(), 'Re: ' + conv.Title())
def testAddReplyOnlyAddsReIfNotSet(self):
conv = self.conv
reply = self.comment.addReply('reply1', 'body1')
self.assertEqual(reply.Title(), 'reply1')
def testInReplyTo(self):
reply = self.comment.addReply('reply1', 'body1')
self.failUnlessEqual(self.comment, reply.inReplyTo())
def testGetReplies(self):
reply = self.comment.addReply('reply1', 'body1')
reply2 = self.comment.addReply('reply2', 'body2')
self.failUnlessEqual(len(self.comment.getReplies()), 2)
self.failUnless(reply in self.comment.getReplies())
self.failUnless(reply2 in self.comment.getReplies())
def testGetTitle(self):
self.failUnlessEqual(self.comment.getTitle(), 'comment1')
def testGetText(self):
self.failUnlessEqual(self.comment.getText(), 'comment1 body')
def testSetInReplyTo(self):
forum = self.forum
conv = forum.addConversation('subject', 'body')
msg = conv.addComment('msg_subject', 'msg_body')
msg1 = conv.addComment('msg_subject1', 'msg_body1')
msg1.setInReplyTo(msg)
self.assertEqual(msg.getId(), msg1.inReplyTo().getId())
def testDeleteReply(self):
conv = self.conv
m = conv.objectValues()[0]
self.assertEqual(conv.getNumberOfComments(), 2)
r = m.addReply('reply1', 'body1')
self.assertEqual(conv.getNumberOfComments(), 3)
m.deleteReply(r)
self.assertEqual(len(m.getReplies()), 0)
def testMakeBranch(self):
forum = self.forum
conv = self.conv
comment = conv.objectValues()[0]
reply = comment.addReply('reply1', 'body1')
reply1 = reply.addReply('reply2', 'body2')
self.assertEqual(conv.getNumberOfComments(), 4)
self.assertEqual(forum.getNumberOfConversations(), 1)
branch = reply.makeBranch()
self.assertEqual(conv.getNumberOfComments(), 2)
self.assertEqual(forum.getNumberOfConversations(), 2)
self.failIfEqual(branch, conv)
self.assertEqual(branch.getNumberOfComments(), 2)
def testChildIds(self):
conv = self.conv
r = self.comment.addReply('reply_subject', 'reply_body')
r1 = self.comment.addReply('reply_subject1', 'reply_body1')
r2 = self.comment.addReply('reply_subject2', 'reply_body2')
r2.addReply('rs', 'rb').addReply('rs1', 'rb1').addReply('rs2', 'rb2')
self.assertEqual(len(self.comment.childIds()), 6)
def testTransforms(self):
conv = self.conv
text = 'Smiley :)'
self.comment.setText(text)
self.failUnless(self.comment.getText())
self.failIfEqual(self.comment.getText(), text)
self.failUnlessEqual(self.portal.portal_ploneboard.performCommentTransform(text), self.comment.getText())
def XXXtestDeleting(self):
pass
def testNewCommentIsVisibleToAnonymous(self):
comment = self.conv.addComment('subject2', 'body2')
id = comment.getId()
self.logout()
comments = self.conv.getComments()
self.failUnless(id in [x.getId() for x in comments])
def testMemberWithNoFullname(self):
addMember(self, 'membernofullname', fullname='')
self.login('membernofullname')
comment = self.conv.addComment('subject3', 'body3')
commentview = comment.restrictedTraverse('@@singlecomment_view')
self.assertEqual(commentview.author(), 'membernofullname')
def testMemberWithFullname(self):
addMember(self, 'memberwithfullname', fullname='MemberName')
self.login('memberwithfullname')
comment = self.conv.addComment('subject4', 'body4')
commentview = comment.restrictedTraverse('@@singlecomment_view')
self.assertEqual(commentview.author(), 'MemberName')
class TestPloneboardCommentAttachmentSupport(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
self.conv = self.forum.addConversation('conv1', 'conv1 body')
self.comment = self.conv.addComment("comment1", "comment1 body")
def testAddAttachment(self):
conv = self.conv
self.assertEqual(self.comment.getNumberOfAttachments(), 0)
file = File('testfile', 'testtitle', 'asdf')
self.comment.addAttachment(file=file, title='comment')
self.assertEqual(self.comment.getNumberOfAttachments(), 1)
self.failUnless(self.comment.getAttachment('testfile'))
def testHasAttachment(self):
pass
def testRemoveAttachment(self):
conv = self.conv
file = File('testfile', 'testtitle', 'asdf')
self.comment.addAttachment(file=file, title='comment')
self.assertEqual(self.comment.getNumberOfAttachments(), 1)
self.comment.removeAttachment('testfile')
self.assertEqual(self.comment.getNumberOfAttachments(), 0)
def testAttachmentRestrictionChanging(self):
conv = self.conv
self.forum.setMaxAttachments(10)
self.failUnlessEqual(self.comment.getNumberOfAllowedAttachments(), 10)
self.forum.setMaxAttachments(1)
self.failUnlessEqual(self.comment.getNumberOfAllowedAttachments(), 1)
def tryAttachmentSizeRestrictions(self, msg):
self.forum.setMaxAttachments(10)
self.forum.setMaxAttachmentSize(1)
file = File('testfile', 'testtitle', 'X')
msg.addAttachment(file=file, title='comment')
msg.removeAttachment('testfile')
file = File('testfile', 'testtitle', 'X'*2048)
try:
msg.addAttachment(file=file, title='comment')
except ValueError:
pass
else:
self.fail("Can add too many attachments")
self.forum.setMaxAttachmentSize(2)
msg.addAttachment(file=file, title='comment')
def testAttachmentSizeRestriction(self):
conv = self.conv
self.tryAttachmentSizeRestrictions(self.comment)
def testAttachmentSizeRestrictionsOnChild(self):
conv = self.conv
reply = self.comment.addReply('reply1', 'body1')
self.tryAttachmentSizeRestrictions(reply)
def testAttachmentNumberRestriction(self):
conv = self.conv
self.forum.setMaxAttachments(1)
file = File('testfile', 'testtitle', 'asdf')
self.comment.addAttachment(file=file, title='comment')
file = File('testfile2', 'testtitle2', 'asdf')
try:
self.comment.addAttachment(file=file, title='another comment')
except ValueError:
pass
else:
self.fail("Can add too many attachments")
def testGetAttachments(self):
conv = self.conv
self.forum.setMaxAttachments(5)
file = File('testfile', 'testtitle', 'asdf')
self.comment.addAttachment(file=file, title='comment')
file1 = File('testfile1', 'testtitle1', 'asdf')
self.comment.addAttachment(file=file1, title='comment1')
self.assertEqual(len(self.comment.getAttachments()), 2)
self.failUnless('comment' in [v.Title() for v in self.comment.getAttachments()])
self.failUnless('comment1' in [v.Title() for v in self.comment.getAttachments()])
def testDeleteing(self):
"""Test deleting a comment.
"""
# Was going to use doctests for this until I realized that
# PloneTestCase has no doctest capability :(
# - Rocky
# Actually - it does!
# see http://plone.org/documentation/tutorial/testing :)
# - Martin
# Now lets start adding some comments:
first_comment = self.conv.getFirstComment()
c1 = first_comment.addReply('foo1', 'bar1')
c2 = first_comment.addReply('foo2', 'bar2')
c21 = c2.addReply('foo3', 'bar3')
# Make sure the first comment has exactly two replies:
self.assert_(first_comment.getReplies(), [c1, c2])
# Now lets try deleting the first reply to the main comment:
c1.delete()
self.assert_(first_comment.getReplies(), [c2])
# Ok, so lets try deleting a comment that has replies to it:
c2.delete()
# Now even though we deleted the last remaining reply to
# first_comment, we should still have another reply because
# deleting a reply that had a child will make that child seem
# as though it is a reply to its parent's parent.
self.assert_(first_comment.getReplies(), [c21])
# lets add a comment to c21
c211 = c21.addReply('foo4', 'bar4')
# Once the only root comment is deleted that means the conversation's
# sole root comment should become c211
c21.delete()
self.assert_(self.conv.getRootComments(), [c211])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPloneboardComment))
if HAS_SIMPLEATTACHMENT:
suite.addTest(unittest.makeSuite(TestPloneboardCommentAttachmentSupport))
return suite
| Python |
#
# Comment tests
#
import unittest
from Products.Ploneboard.tests import PloneboardTestCase
from Products.CMFPlone.utils import _createObjectByType
class TestITextContentAdapter(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
from Products.ATContentTypes.interface import ITextContent
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
self.conv = self.forum.addConversation('conv1', 'conv1 body')
self.comment = self.conv.addComment("c1 title", "c1 body")
self.textContent = ITextContent(self.comment)
def testGetText(self):
self.assertEqual(self.comment.getText(),
self.textContent.getText())
def testSetText(self):
s = 'blah'
self.textContent.setText('blah')
self.assertEqual(self.comment.getText(), s)
self.assertEqual(self.textContent.getText(), s)
def testCookedBody(self):
self.assertEqual(self.textContent.CookedBody(),
self.comment.getText())
def testEditableBody(self):
self.assertEqual(self.textContent.CookedBody(),
self.comment.getRawText())
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestITextContentAdapter))
return suite
| Python |
#
# Ploneboard tests
#
import unittest
from Products.Ploneboard.tests import PloneboardTestCase
from Products.Ploneboard.batch import Batch
from Products.CMFPlone.utils import _createObjectByType
class TestBatch(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
def batch(self, size=5, start=0, orphan=1):
return Batch(self.forum.getConversations,
self.forum.getNumberOfConversations(),
size, start, orphan=orphan)
def sorted(self, b):
items = [x for x in b]
items.sort(lambda x, y: cmp(x.Title(), y.Title()))
return items
def sbatch(self, size=5, start=0, orphan=1):
b = self.batch(size, start, orphan)
return b, self.sorted(b)
def testEmptyBatch(self):
b = self.batch()
self.assertEqual(len(b), 0)
self.assertEqual(b.next, None)
self.assertEqual(b.previous, None)
def testLessThanOnePage(self):
for i in range(3):
self.forum.addConversation('Title %02s' % i)
b, items = self.sbatch()
self.assertEqual(len(b), 3)
for i in range(3):
self.assertEqual(items[i].Title(), 'Title %02s' % i)
self.assertEqual(b.next, None)
self.assertEqual(b.previous, None)
def testExactlyOnePage(self):
for i in range(5):
self.forum.addConversation('Title %02s' % i)
b, items = self.sbatch()
self.assertEqual(len(b), 5)
for i in range(5):
self.assertEqual(items[i].Title(), 'Title %02s' % i)
self.assertEqual(b.next, None)
self.assertEqual(b.previous, None)
def testOnePagePlusOrphan(self):
for i in range(6):
self.forum.addConversation('Title %02s' % i)
b, items = self.sbatch()
self.assertEqual(len(b), 6)
for i in range(6):
self.assertEqual(items[i].Title(), 'Title %02s' % i)
self.assertEqual(b.next, None)
self.assertEqual(b.previous, None)
# note: when testing more than one page, we don't test for the
# exact objects returned, because getConversations returns things
# sorted by modified date, but when we create objects in a loop like
# below, the resolution of the timestamp isn't good enough, and thus order
# is sometimes unpredictable, leading to non-deterministic tests.
def testOnePagePlusOneMoreThanOrphan(self):
for i in range(7):
self.forum.addConversation('Title %02s' % i)
b = self.batch()
self.assertEqual(b.previous, None)
self.assertNotEqual(b.next, None)
self.assertEqual(len(b.next), 2)
def testGetLastPage(self):
for i in range(8):
self.forum.addConversation('Title %02s' % i)
b = self.batch(start=5)
self.assertEqual(len(b), 3)
self.assertEqual(b.next, None)
self.assertNotEqual(b.previous, None)
self.assertEqual(len(b.previous), 5)
def testGetMiddlePage(self):
for i in range(12):
self.forum.addConversation('Title %02s' % i)
b = self.batch(start=5)
self.assertEqual(len(b), 5)
self.assertNotEqual(b.next, None)
self.assertNotEqual(b.previous, None)
self.assertEqual(len(b.next), 2)
self.assertEqual(len(b.previous), 5)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestBatch))
return suite
| Python |
#
# Tests the default workflow
#
from AccessControl.Permission import Permission
from Products.CMFPlone.tests import PloneTestCase
from Products.CMFCore.utils import _checkPermission as checkPerm
from Products.CMFPlone.utils import _createObjectByType
from Products.Ploneboard.Extensions import WorkflowScripts # Catch errors
from Products.Ploneboard.tests import PloneboardTestCase
from Products.Ploneboard import permissions
default_user = PloneTestCase.default_user
class TestCommentWorkflow(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.workflow = self.portal.portal_workflow
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
self.conv = self.forum.addConversation('conv1', 'conv1 body')
self.comment = self.conv.addComment("title", "body")
self.portal.acl_users._doAddUser('member', 'secret', ['Member'], [])
self.portal.acl_users._doAddUser('member2', 'secret', ['Member'], [])
self.portal.acl_users._doAddUser('reviewer', 'secret', ['Reviewer'], [])
self.portal.acl_users._doAddUser('manager', 'secret', ['Manager'], [])
# Check allowed transitions
def testAutoPublishMemberposting(self):
self.login('member')
self.failUnless(checkPerm(permissions.ApproveComment, self.forum))
self.failUnless(checkPerm(permissions.ApproveComment, self.conv))
self.failUnless(checkPerm(permissions.ApproveComment, self.comment))
self.assertEqual(self.workflow.getInfoFor(self.forum, 'review_state'), 'memberposting')
self.assertEqual(self.workflow.getInfoFor(self.conv, 'review_state'), 'active')
self.assertEqual(self.workflow.getInfoFor(self.comment, 'review_state'), 'published')
# make_moderated disabled until moderation is fixed in general
# def testAutoSubmitModerated(self):
# self.workflow.doActionFor(self.forum, 'make_moderated')
#
# self.login('member')
#
# conv = self.forum.addConversation('conv2', 'conv2 body')
# comment = conv.objectValues()[0]
#
# self.failIf(checkPerm(permissions.ApproveComment, self.forum))
# self.failIf(checkPerm(permissions.ApproveComment, self.conv))
# self.failIf(checkPerm(permissions.ApproveComment, comment))
#
# self.assertEqual(self.workflow.getInfoFor(self.forum, 'review_state'), 'moderated')
# self.assertEqual(self.workflow.getInfoFor(conv, 'review_state'), 'pending')
# self.assertEqual(self.workflow.getInfoFor(comment, 'review_state'), 'pending')
def testCommentEditing(self):
self.login('manager')
conv = self.forum.addConversation('conv2', 'conv2 body')
self.failUnless(checkPerm(permissions.EditComment, self.comment))
self.logout()
self.login('member2')
self.failIf(checkPerm(permissions.EditComment, self.comment))
class TestWorkflowsCreation(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.workflow = self.portal.portal_workflow
def testWorkflowsCreated(self):
workflows = ['ploneboard_workflow', 'ploneboard_forum_workflow',
'ploneboard_conversation_workflow', 'ploneboard_comment_workflow']
for workflow in workflows:
self.failUnless(workflow in self.workflow.objectIds(), "%s missing" % workflow)
def XXXtestPreserveChainsOnReinstall(self):
# Disable this test: GenericSetup profiles will always overwrite the
# workflow chains for the types
boardtypes = ('Ploneboard',
'PloneboardForum',
'PloneboardConversation',
'PloneboardComment')
self.workflow.setChainForPortalTypes(boardtypes, 'plone_workflow')
self.workflow.getChainForPortalType('Ploneboard')
for boardtype in boardtypes:
self.failUnless('plone_workflow' in self.workflow.getChainForPortalType(boardtype),
'Workflow chain for %s not set' % boardtype)
self.portal.portal_quickinstaller.reinstallProducts(['Ploneboard'])
for boardtype in boardtypes:
chain = self.workflow.getChainForPortalType(boardtype)
self.failUnless('plone_workflow' in chain,
'Overwritten workflow chain for %s: %s' % (boardtype, ', '.join(chain)))
def testPermissionsOnPortal(self):
p=Permission('Ploneboard: Add Comment Attachment', (), self.portal)
roles=p.getRoles()
self.failUnless('Member' in roles)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestCommentWorkflow))
suite.addTest(makeSuite(TestWorkflowsCreation))
return suite
| Python |
#
# Event notification tests
#
import unittest
import zope.component
from Products.Ploneboard.tests import PloneboardTestCase
from Products.Archetypes.event import ObjectInitializedEvent
from Products.CMFPlone.utils import _createObjectByType
notified = []
@zope.component.adapter(ObjectInitializedEvent)
def dummyEventHandler(event):
notified.append(event.object)
class TestPloneboardEventNotifications(PloneboardTestCase.PloneboardTestCase):
"""Test the events that should be fired when conversations or comments are added"""
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
zope.component.provideHandler(dummyEventHandler)
def testPloneboardEventNotifications(self):
conv = self.forum.addConversation('subject', 'body')
self.failUnless(conv in notified)
comment = conv.addComment('subject', 'body')
self.failUnless(comment in notified)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPloneboardEventNotifications))
return suite
| Python |
#
# Ploneboard transform tests
#
import unittest
from Products.Ploneboard.tests import PloneboardTestCase
from Products.CMFCore.utils import getToolByName
from Products.Ploneboard.config import PLONEBOARD_TOOL
from Products.CMFPlone.utils import _createObjectByType
class TestTransformRegistration(PloneboardTestCase.PloneboardTestCase):
"""Test transform registration """
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
def testDefaultRegistrations(self):
"""Check if the default registrations are present."""
tool = getToolByName(self.portal, PLONEBOARD_TOOL)
self.failUnlessEqual(len(tool.getTransforms()), 3)
self.failUnlessEqual(len(tool.getEnabledTransforms()), 3)
def testDisabling(self):
"""Try registering and unregistering a transform"""
tool = getToolByName(self.portal, PLONEBOARD_TOOL)
tool.enableTransform('safe_html', enabled=False)
self.failIf('safe_html' in tool.getEnabledTransforms())
tool.enableTransform('safe_html')
self.failUnless('safe_html' in tool.getEnabledTransforms())
def testUnregisteringAllRemovesOnlyThoseAdded(self):
tool = getToolByName(self.portal, PLONEBOARD_TOOL)
tool.unregisterAllTransforms()
transforms = getToolByName(self.portal, 'portal_transforms')
self.failIf('url_to_hyperlink' in transforms.objectIds())
self.failIf('text_to_emoticons' in transforms.objectIds())
self.failUnless('safe_html' in transforms.objectIds())
def testUnregisteringIndividualRemovesOnlyThoseAdded(self):
tool = getToolByName(self.portal, PLONEBOARD_TOOL)
transforms = getToolByName(self.portal, 'portal_transforms')
tool.unregisterTransform('url_to_hyperlink')
self.failIf('url_to_hyperlink' in transforms.objectIds())
tool.unregisterTransform('safe_html')
self.failUnless('safe_html' in transforms.objectIds())
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTransformRegistration))
return suite
| Python |
"""Ploneboard tests package
"""
| Python |
#
# Forum tests
#
import unittest
from zExceptions import Unauthorized
from zope.interface.verify import verifyClass, verifyObject
import PloneboardTestCase
from Products.Ploneboard.interfaces import IForum
from Products.Ploneboard.content.PloneboardForum import PloneboardForum
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import _createObjectByType
class TestPloneboardForum(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
self.forum = _createObjectByType('PloneboardForum', self.board, 'forum')
def testInterfaceVerification(self):
self.failUnless(verifyClass(IForum, PloneboardForum))
def testInterfaceConformance(self):
self.failUnless(IForum.providedBy(self.forum))
self.failUnless(verifyObject(IForum, self.forum))
def testForumFields(self):
"""
Check the fields on Forum, especially the Description field mimetypes.
"""
forum = self.forum
self.assertEqual(forum.getId(), 'forum')
forum.setTitle('title')
self.assertEqual(forum.Title(), 'title')
forum.setDescription('description')
self.assertEqual(forum.Description(), 'description')
self.assertEqual(forum.Description(mimetype='text/html'), '<p>description</p>')
def testForumCategory(self):
self.failUnlessEqual(len(self.forum.getCategories()), 0)
self.board.setCategories(['Category'])
self.failUnlessEqual(len(self.forum.getCategories()), 1)
# Interface tests
def testGetBoard(self):
self.failUnlessEqual(self.board, self.forum.getBoard())
def testGetBoardOutsideStrictContainment(self):
forum = _createObjectByType('PloneboardForum', self.folder, 'forum')
self.failUnlessEqual(None, forum.getBoard())
def testAddConversation(self):
"""
Create new folder in home directory & check its basic properties and behaviour
"""
forum = self.forum
conv = forum.addConversation('subject', 'body')
conv_id = conv.getId()
self.failUnless(conv_id in forum.objectIds())
self.assertEqual(conv.Title(), 'subject')
def testGetConversation(self):
forum = self.forum
conv = forum.addConversation('subject', 'body')
self.failUnlessEqual(conv, forum.getConversation(conv.getId()))
def testGetConversationOutsideStrictContainment(self):
# Make a folder inside the forum, then a conversation in the folder
pass
def testRemoveConversation(self):
"""
Create new folder in home directory & check its basic properties and behaviour
"""
forum = self.forum
conv = forum.addConversation('subject', 'body')
conv_id = conv.getId()
forum.removeConversation(conv_id)
self.assertEqual(len(forum.objectIds()), 0)
self.failIf(conv_id in forum.objectIds())
def testGetConversations(self):
forum = self.forum
conv = forum.addConversation('subject', 'body')
conv2 = forum.addConversation('subject2', 'body2')
# Notice reverse ordering, last always first
self.failUnlessEqual(forum.getConversations(), [conv2, conv])
# Check to make sure it doesn't get comments elsewhere
forum2 = _createObjectByType('PloneboardForum', self.board, 'forum2')
forum2.addConversation('subject', 'body')
self.failUnlessEqual(forum.getConversations(), [conv2, conv])
def testGetConversationsWithSlicing(self):
forum = self.forum
conv = forum.addConversation('subject', 'body')
conv2 = forum.addConversation('subject2', 'body2')
self.failUnlessEqual(forum.getConversations(limit=1, offset=0), [conv2])
self.failUnlessEqual(forum.getConversations(limit=1, offset=1), [conv])
def testGetConversationsOutsideStrictContainment(self):
# Make a folder inside the forum, then a conversation in the folder
pass
def testGetNumberOfConversations(self):
forum = self.forum
self.failUnlessEqual(forum.getNumberOfConversations(), 0)
conv = forum.addConversation('subject', 'body')
self.failUnlessEqual(forum.getNumberOfConversations(), 1)
conv2 = forum.addConversation('subject2', 'body2')
self.failUnlessEqual(forum.getNumberOfConversations(), 2)
forum.removeConversation(conv.getId())
self.failUnlessEqual(forum.getNumberOfConversations(), 1)
# Check to make sure it doesn't count conversations elsewhere
forum2 = _createObjectByType('PloneboardForum', self.board, 'forum2')
conv = forum2.addConversation('subject', 'body')
self.failUnlessEqual(forum.getNumberOfConversations(), 1)
def testGetNumberOfComments(self):
forum = self.forum
self.failUnlessEqual(forum.getNumberOfComments(), 0)
conv = forum.addConversation('subject', 'body')
self.failUnlessEqual(forum.getNumberOfComments(), 1)
conv2 = forum.addConversation('subject2', 'body2')
self.failUnlessEqual(forum.getNumberOfComments(), 2)
forum.removeConversation(conv.getId())
self.failUnlessEqual(forum.getNumberOfComments(), 1)
conv2.addComment('followup', 'text')
self.failUnlessEqual(forum.getNumberOfComments(), 2)
# Check to make sure it doesn't count comments elsewhere
forum2 = _createObjectByType('PloneboardForum', self.board, 'forum2')
conv = forum2.addConversation('subject', 'body')
conv.addComment("another", "another")
self.failUnlessEqual(forum.getNumberOfComments(), 2)
class TestPloneboardForumRSSFeed(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board',
title='Test Board')
self.forum=self.board.addForum('forum1', 'Title one', 'Description one')
self.syn_tool = getToolByName(self.portal, 'portal_syndication')
self.view = self.forum.restrictedTraverse("@@RSS")
def testDisblingSyndication(self):
self.assertEqual(self.syn_tool.isSyndicationAllowed(self.forum), True)
self.syn_tool.disableSyndication(self.forum)
self.assertEqual(self.syn_tool.isSyndicationAllowed(self.forum), False)
def testViewNotAllowedWithSyndicationDisabled(self):
self.syn_tool.disableSyndication(self.forum)
self.assertRaises(Unauthorized, self.view.__call__)
def testViewUrl(self):
self.assertEqual(self.view.url(), self.forum.absolute_url())
def testViewDate(self):
self.assertEqual(self.view.date(), self.forum.modified().HTML4())
def testViewTitle(self):
self.assertEqual(self.view.title(), 'Title one')
def testHumbleBeginnings(self):
self.view.update()
self.assertEqual(self.view.comments, [])
def testFirstComment(self):
conv=self.forum.addConversation('Conversation one', 'Text one')
conv.addComment("comment title", "comment body")
self.view.update()
self.assertEqual(len(self.view.comments), 2)
def testCommentInfo(self):
conv=self.forum.addConversation('Conversation one', 'Text one')
conv.addComment("comment title", "comment body")
self.view.update()
comment=self.view.comments[0]
self.assertEqual(comment['title'], 'comment title')
self.assertEqual(comment['description'], 'comment body')
self.assertEqual(comment['author'], 'test_user_1_')
self.failUnless('date' in comment)
self.failUnless('url' in comment)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPloneboardForum))
suite.addTest(unittest.makeSuite(TestPloneboardForumRSSFeed))
return suite
| Python |
#
# Comment tests
#
import unittest
import PloneboardTestCase
from Products.Ploneboard.transforms.url_to_hyperlink import URLToHyperlink
class MockTransformData:
def setData(self, value):
self.data=value
class TestUrlTransform(PloneboardTestCase.PloneboardTestCase):
def runTest(self, testdata):
transform = URLToHyperlink()
data=MockTransformData()
for (input, answer) in testdata:
transform.convert(input, data)
self.failUnlessEqual(data.data, answer)
def testPlainText(self):
testdata = [
("just a simple string", "just a simple string"),
("htXtp:bla invalid scheme", "htXtp:bla invalid scheme"),
]
self.runTest(testdata)
def testPlainUrls(self):
testdata = [
("http://simple.url/", '<a href="http://simple.url/">http://simple.url/</a>'),
# XXX are URI schemes really case insensitive?
("HTtp://simple.url/", '<a href="HTtp://simple.url/">HTtp://simple.url/</a>'),
("https://simple.url/", '<a href="https://simple.url/">https://simple.url/</a>'),
("telnet://simple.url/", '<a href="telnet://simple.url/">telnet://simple.url/</a>'),
]
self.runTest(testdata)
def testUrlElements(self):
testdata = [
("<telnet://simple.url/>", '<telnet://simple.url/>'),
("< telnet://simple.url/>", '< telnet://simple.url/>'),
("<telnet://simple.url />", '<telnet://simple.url />'),
("http://change.this/ <telnet://simple.url/>", '<a href="http://change.this/">http://change.this/</a> <telnet://simple.url/>'),
]
self.runTest(testdata)
def testEmail(self):
testdata = [
("test@example.com", '<a href="mailto:test@example.com">test@example.com</a>'),
("<test@example.com>", "<test@example.com>"),
]
self.runTest(testdata)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestUrlTransform))
return suite
| Python |
#
# Ploneboard tests
#
import unittest
from zExceptions import Unauthorized
from zope.interface.verify import verifyClass, verifyObject
from Products.Ploneboard.tests import PloneboardTestCase
from Products.Ploneboard.interfaces import IPloneboard, IForum
from Products.Ploneboard.content.Ploneboard import Ploneboard
from Products.CMFCore.utils import getToolByName
# Catch errors in Install
from Products.Ploneboard.Extensions import Install
from Products.CMFPlone.utils import _createObjectByType
class TestPloneboardBasics(PloneboardTestCase.PloneboardTestCase):
"""Test the basics, like creation"""
def testPloneboardCreation(self):
"""Try creating and deleting a board"""
board_id = 'board'
board = _createObjectByType('Ploneboard', self.portal, board_id)
self.failIfEqual(board, None)
self.failUnless(board_id in self.portal.objectIds())
self.portal._delObject(board_id)
self.failIf(board_id in self.portal.objectIds())
class TestPloneboardInterface(PloneboardTestCase.PloneboardTestCase):
"""Test if it fulfills the interface"""
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
def testInterfaceVerification(self):
self.failUnless(verifyClass(IPloneboard, Ploneboard))
def testInterfaceConformance(self):
self.failUnless(IPloneboard.providedBy(self.board))
self.failUnless(verifyObject(IPloneboard, self.board))
def testAddForum(self):
"""Create new folder in home directory & check its basic
properties and behaviour"""
board = self.board
forum_id = 'forum'
board.addForum(forum_id, 'title', 'description')
self.failUnless(forum_id in board.objectIds())
forum = getattr(board, forum_id)
self.failUnless(IForum.providedBy(forum))
def testRemoveForum(self):
board = self.board
forum_id = 'forum'
board.addForum(forum_id, 'title', 'description')
board.removeForum(forum_id)
self.failIf(forum_id in board.objectIds())
def testGetForum(self):
board = self.board
forum_id = 'forum'
board.addForum(forum_id, 'title', 'description')
forum = board.getForum(forum_id)
self.failUnless(IForum.providedBy(forum))
def testGetForumIds(self):
board = self.board
forum_ids = ['forum1', 'forum2']
for forum_id in forum_ids:
board.addForum(forum_id, 'title', 'description')
self.failUnlessEqual(forum_ids, board.getForumIds())
def testGetForums(self):
board = self.board
forum_ids = ['forum1', 'forum2']
for forum_id in forum_ids:
board.addForum(forum_id, 'title', 'description')
self.failUnlessEqual(
set([board.getForum(forum_id) for forum_id in forum_ids]),
set(board.getForums()))
def testSearchComments(self):
pass
class TestPloneboardWithoutContainment(PloneboardTestCase.PloneboardTestCase):
"""Test a single board used more as a topic, with forums in folders"""
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board')
def testGetForumsOutsideBoard(self):
forum1 = _createObjectByType('PloneboardForum', self.folder, 'forum1')
forum2 = _createObjectByType('PloneboardForum', self.folder, 'forum2')
forums = self.board.getForums(sitewide=True)
self.failUnless(forum1 in forums)
self.failUnless(forum2 in forums)
class TestPloneboardRSSFeed(PloneboardTestCase.PloneboardTestCase):
def afterSetUp(self):
self.board = _createObjectByType('Ploneboard', self.folder, 'board',
title='Test Board')
self.syn_tool = getToolByName(self.portal, 'portal_syndication')
self.view = self.board.restrictedTraverse("@@RSS")
def testEnablingSyndication(self):
self.assertEqual(self.syn_tool.isSyndicationAllowed(self.board), False)
self.syn_tool.enableSyndication(self.board)
self.assertEqual(self.syn_tool.isSyndicationAllowed(self.board), True)
def testViewNotAllowedWithSyndicationDisabled(self):
self.assertRaises(Unauthorized, self.view.__call__)
def testViewUrl(self):
self.assertEqual(self.view.url(), self.board.absolute_url())
def testViewDate(self):
self.assertEqual(self.view.date(), self.board.modified().HTML4())
def testViewTitle(self):
self.assertEqual(self.view.title(), 'Test Board')
def testHumbleBeginnings(self):
self.view.update()
self.assertEqual(self.view.comments, [])
def testFirstComment(self):
forum=self.board.addForum('forum1', 'Title one', 'Description one')
conv=forum.addConversation('Conversation one', 'Text one')
conv.addComment("comment title", "comment body")
self.view.update()
self.assertEqual(len(self.view.comments), 2) # original text is first comment
def testCommentInfo(self):
forum=self.board.addForum('forum1', 'Title one', 'Description one')
conv=forum.addConversation('Conversation one', 'Text one')
conv.addComment("comment title", "comment body")
self.view.update()
comment=self.view.comments[0]
self.assertEqual(comment['title'], 'comment title')
self.assertEqual(comment['description'], 'comment body')
self.assertEqual(comment['author'], 'test_user_1_')
self.failUnless('date' in comment)
self.failUnless('url' in comment)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPloneboardBasics))
suite.addTest(unittest.makeSuite(TestPloneboardInterface))
suite.addTest(unittest.makeSuite(TestPloneboardWithoutContainment))
suite.addTest(unittest.makeSuite(TestPloneboardRSSFeed))
return suite
| Python |
from Products.CMFCore import permissions
from Products.CMFCore.permissions import setDefaultRoles
# Add permissions differ for each type, and are imported by __init__.initialize
# so don't change their names!
ViewBoard = permissions.View
SearchBoard = 'Ploneboard: Search'
AddBoard = AddPloneboard = 'Ploneboard: Add Ploneboard'
ManageBoard = 'Ploneboard: Add Ploneboard'
RequestReview = permissions.RequestReview
AddForum = AddPloneboardForum = 'Ploneboard: Add Forum'
ManageForum = 'Ploneboard: Add Forum'
AddConversation = AddPloneboardConversation = 'Ploneboard: Add Conversation'
ManageConversation = 'Ploneboard: Manage Conversation'
MoveConversation = 'Ploneboard: Move Conversation'
MergeConversation = 'Ploneboard: Merge Conversation'
AddComment = AddPloneboardComment = 'Ploneboard: Add Comment'
EditComment = permissions.ModifyPortalContent
AddAttachment = AddPloneboardAttachment = 'Ploneboard: Add Comment Attachment'
ManageComment = 'Ploneboard: Manage Comment'
ApproveComment = 'Ploneboard: Approve Comment' # Used for moderation
RetractComment = 'Ploneboard: Retract Comment'
ModerateForum = 'Ploneboard: Moderate Forum'
# Note: if this changes, you must also change configure.zcml!
DeleteComment = permissions.DeleteObjects
# Set up default roles for permissions
setDefaultRoles(ViewBoard,
('Anonymous', 'Member', 'Manager'))
setDefaultRoles(SearchBoard,
('Anonymous', 'Member', 'Manager'))
setDefaultRoles(AddBoard,
('Manager', 'Owner'))
setDefaultRoles(ManageBoard,
('Manager', 'Owner'))
setDefaultRoles(AddConversation,
('Authenticated', 'Manager'))
setDefaultRoles(AddComment,
('Authenticated', 'Manager'))
setDefaultRoles(AddAttachment,
('Manager',))
setDefaultRoles(ManageConversation,
('Manager',))
setDefaultRoles(MoveConversation,
('Manager',))
setDefaultRoles(MergeConversation,
('Manager',))
setDefaultRoles(ManageComment,
('Manager',))
setDefaultRoles(ApproveComment,
('Manager',))
setDefaultRoles(RetractComment,
('Manager',))
setDefaultRoles(ModerateForum,
('Manager', 'Reviewer',))
| Python |
from zope.interface import implements
from Products.ATContentTypes.interface import ITextContent
class CommentTextContent(object):
implements(ITextContent)
def __init__(self, context):
self.context = context
def getText(self, **kwargs):
return self.context.getText()
def setText(self, value, **kwargs):
self.context.setText(value, **kwargs)
def CookedBody(self, stx_level='ignored'):
return self.getText()
def EditableBody(self):
return self.getRawText()
| Python |
"""Migrate from 0.1b1 to 1.0b.
"""
# Zope imports
from ZODB.PersistentMapping import PersistentMapping
from Acquisition import aq_base
# CMF imports
from Products.CMFCore.utils import getToolByName
# Product imports
class Migration(object):
"""Migrate from 0.1b1 to 1.0b.
"""
def __init__(self, site, out):
self.site = site
self.out = out
self.catalog = getToolByName(self.site, 'portal_catalog')
def migrate(self):
"""Run migration on site object passed to __init__.
"""
print >> self.out, u"Migrating Ploneboard 0.1b1 -> 1.0b"
self.findAndCatalogAndClean()
def findAndCatalogAndClean(self):
"""Manually find all the ploneboard types, recatalog them, and remove
their old index objects that are just ZODB turds now.
"""
# Ploneboard instances themselves should still be cataloged...
pb_brains = self.catalog(portal_type='Ploneboard')
forum_count = 0
conv_count = 0
comm_count = 0
for pb in [brain.getObject() for brain in pb_brains]:
if pb.hasObject('ploneboard_catalog'):
pb._delObject('ploneboard_catalog')
msg = u"Removed stale 'ploneboard_catalog' from Ploneboard at %s."
print >> self.out, msg % '/'.join(pb.getPhysicalPath())
else:
msg = u"Checked for stale 'ploneboard_catalog' object on %s, but not present."
print >> self.out, msg % '/'.join(pb.getPhysicalPath())
# Directly access the stored forums. The Ploneboard API won't work here
for forum in pb.objectValues('PloneboardForum'):
forum.reindexObject()
self._cleanIndex(forum)
forum_count += 1
for conv in forum.objectValues('PloneboardConversation'):
conv.reindexObject()
self._cleanIndex(conv)
conv_count += 1
for comm in conv.objectValues('PloneboardComment'):
comm.reindexObject()
comm_count += 1
msg = "Indexed and cleaned %s forums, %s conversations, and %s comments."
print >> self.out, msg % (forum_count, conv_count, comm_count)
def _cleanIndex(self, ob):
if getattr(aq_base(ob), '_index', None) is not None:
delattr(ob, '_index')
msg = u"Removed stale '_index' from %s at %s."
print >> self.out, msg % (ob.meta_type, '/'.join(ob.getPhysicalPath()))
else:
msg = u"Checked for stale '_index' attribute on '%s' at %s, but not present."
print >> self.out, msg % (ob.meta_type, '/'.join(ob.getPhysicalPath()))
| Python |
"""
$Id: __init__.py 53403 2007-11-08 09:54:35Z wichert $
"""
from Products.Archetypes.public import process_types, listTypes
from Products.CMFCore.DirectoryView import registerDirectory
from Products.Ploneboard.PloneboardTool import PloneboardTool
import sys
from Products.Ploneboard.config import SKINS_DIR, GLOBALS, PROJECTNAME
import Products.Ploneboard.catalog
registerDirectory(SKINS_DIR, GLOBALS)
this_module = sys.modules[ __name__ ]
def initialize(context):
##Import Types here to register them
import Products.Ploneboard.content
# If we put this import line to the top of module then
# utils will magically point to Ploneboard.utils
from Products.CMFCore import utils
utils.ToolInit('Ploneboard Tool',
tools=(PloneboardTool,),
icon='tool.gif'
).initialize(context)
content_types, constructors, ftis = process_types(
listTypes(PROJECTNAME),
PROJECTNAME)
# Assign an own permission to all content types
# Heavily based on Bricolite's code from Ben Saller
import permissions as perms
allTypes = zip(content_types, constructors)
for atype, constructor in allTypes:
kind = "%s: %s" % (PROJECTNAME, atype.archetype_name)
utils.ContentInit(
kind,
content_types = (atype,),
# Add permissions look like perms.Add{meta_type}
permission = getattr(perms, 'Add%s' % atype.meta_type),
extra_constructors = (constructor,),
fti = ftis,
).initialize(context)
from AccessControl import allow_class
from batch import Batch
allow_class(Batch)
this_module.Batch = Batch
# Avoid breaking old Ploneboard instances when moving content types modules
# from Ploneboard/types/ to Ploneboard/content/
import content
sys.modules['Products.Ploneboard.types'] = content
| Python |
from zope.interface import implements
from zope.app.schema.vocabulary import IVocabularyFactory
from Products.CMFCore.utils import getToolByName
from zope.schema.vocabulary import SimpleVocabulary
class AvailableTransformsVocabulary(object):
"""Vocabulary for available Ploneboard transforms.
"""
implements(IVocabularyFactory)
def __call__(self, context):
context=getattr(context, "context", context)
tool=getToolByName(context, "portal_ploneboard")
items=[(t,t) for t in tool.getTransforms()]
items.sort()
return SimpleVocabulary.fromItems(items)
AvailableTransformsVocabularyFactory=AvailableTransformsVocabulary()
| Python |
"""
$Id: config.py 55779 2007-12-18 14:26:49Z fschulze $
"""
PROJECTNAME = "Ploneboard"
SKINS_DIR = 'skins'
I18N_DOMAIN = PROJECTNAME.lower()
# Transform config
EMOTICON_TRANSFORM_ID = 'text_to_emoticons'
EMOTICON_TRANSFORM_MODULE = 'Products.Ploneboard.transforms.text_to_emoticons'
URL_TRANSFORM_MODULE = 'Products.Ploneboard.transforms.url_to_hyperlink'
SAFE_HTML_TRANSFORM_MODULE = 'Products.PortalTransforms.transforms.safe_html'
PLONEBOARD_TRANSFORMSCHAIN_ID = 'ploneboard_chain'
PLONEBOARD_TOOL = 'portal_ploneboard'
REPLY_RELATIONSHIP = 'ploneboard_reply_to'
# This should be configurable ttw
NUMBER_OF_ATTACHMENTS = 5
GLOBALS = globals()
try:
from Products import SimpleAttachment as SA
HAS_SIMPLEATTACHMENT = True
del SA
except ImportError:
HAS_SIMPLEATTACHMENT = False
| Python |
from setuptools import setup, find_packages
version = '2.1b2'
setup(name='Products.Ploneboard',
version=version,
description="A discussion board for Plone.",
long_description=open("README.txt").read() + \
open("docs/INSTALL.txt").read() + \
open("docs/HISTORY.txt").read(),
classifiers=[
"Framework :: Plone",
"Framework :: Zope2",
"Programming Language :: Python",
],
keywords='Zope CMF Plone board forum',
author='Jarn, Wichert Akkerman, Martin Aspeli',
author_email='plone-developers@lists.sourceforge.net',
url='http://svn.plone.org/svn/collective/Ploneboard/trunk',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['Products'],
include_package_data=True,
zip_safe=False,
download_url='http://plone.org/products/ploneboard',
install_requires=[
'setuptools',
'Products.SimpleAttachment',
'plone.app.controlpanel',
'plone.app.portlets',
'plone.portlets',
'plone.memoize',
'plone.i18n',
'python-dateutil',
'Plone >= 3.3',
],
)
| Python |
##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id: bootstrap.py 75593 2007-05-06 21:11:27Z jim $
"""
import os, shutil, sys, tempfile, urllib2
tmpeggs = tempfile.mkdtemp()
try:
import pkg_resources
except ImportError:
ez = {}
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
import pkg_resources
cmd = 'from setuptools.command.easy_install import main; main()'
if sys.platform == 'win32':
cmd = '"%s"' % cmd # work around spawn lamosity on windows
ws = pkg_resources.working_set
assert os.spawnle(
os.P_WAIT, sys.executable, sys.executable,
'-c', cmd, '-mqNxd', tmpeggs, 'zc.buildout',
dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse('setuptools')).location
),
) == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout')
import zc.buildout.buildout
zc.buildout.buildout.main(sys.argv[1:] + ['bootstrap'])
shutil.rmtree(tmpeggs)
| Python |
#coding=utf8
__author__ = 'alex'
import sys
import os
import uuid
import logging
from flask import Flask, g, session
import settings
from admin import admin
from index import index
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from flask.ext.script import Manager
import biz
dir_path = os.getcwd()
if dir_path not in sys.path:
sys.path.append(dir_path)
app = Flask("yun")
app.config.from_object(settings)
app.register_blueprint(admin)
app.register_blueprint(index)
manager = Manager(app)
DB=create_engine(settings.DB_URI,encoding = "utf-8",pool_recycle=settings.TIMEOUT,echo=False)
Session = scoped_session(sessionmaker(bind=DB))
def init_db():
from models import Base
Base.metadata.create_all(bind=DB)
@app.before_request
def before_request():
"""
在请求执行前执行
"""
g.db = Session()
#用户登陆信息加载
g.user_name = session.get('user_name', None)
g.logined =True if g.user_name else False
#管理员登陆信息加载
if not session.get("session_id", None):
session["session_id"] = uuid.uuid4().hex
g.session_id = session["session_id"]
@app.after_request
def after_request(response):
try:
g.db.flush()
g.db.commit()
g.db.close()
except Exception, e:
logging.error(e.message)
g.db.rollback()
return response
@app.teardown_request
def tear_down(exception=None):
"""
当请求结束的时候执行
"""
try:
if exception:
g.db.rollback()
else:
g.db.commit()
g.db.close()
except Exception, e:
logging.error(e.message)
g.db.rollback()
@manager.command
def init():
g.db = Session()
init_db()
g.db.commit()
biz.init_site_data()
print "done"
g.db.close()
if __name__ == "__main__":
manager.run()
| Python |
#coding=utf8
__author__ = 'alex'
import datetime
import uuid
from sqlalchemy import Column,Integer,String,DateTime,Boolean,Text,UniqueConstraint,Table, MetaData,ForeignKey, Numeric
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import relationship,backref
from decimal import Decimal
from utils import hash_passwd,read_random, check_passwd
from flask import g
TABLEARGS = {
'mysql_engine': 'InnoDB',
'mysql_charset':'utf8'
}
class DeclaredBase(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True, autoincrement=True)
create_time = Column(DateTime, default=datetime.datetime.now, index=True)
last_modify = Column(DateTime, default=datetime.datetime.now, index=True)
Base = declarative_base(cls=DeclaredBase)
class Admin(Base):
name = Column(String(40))
password = Column(String(40))
nick = Column(String(40))
status = Column(Integer)
__table_args__ = (
UniqueConstraint(name,),
TABLEARGS
)
def __init__(self,name, password, nick):
self.name = name
self.password = hash_passwd(password)
self.nick = nick
self.status = 0
def verify(self, password):
return check_passwd(password,self.password)
class Catalogs(Base):
name = Column(String(20))
forward_id = Column(Integer)
level = Column(Integer)
idx = Column(Integer)
__table_args__ = (
UniqueConstraint(name,),
TABLEARGS
)
def __init__(self,name, forward_id, level, idx):
self.name = name
self.forward_id = forward_id
self.level = level
self.idx = idx
def make_child(self, name):
return g.db.add(Catalogs(name, self.id, self.level+1))
@property
def has_document(self):
return g.db.query(Document).filter(Document.catalog_id==self.id).count()
@property
def nexts(self):
data = list(g.db.query(Catalogs).filter(Catalogs.forward_id==self.id).order_by(Catalogs.idx.asc()))
if not data:
data = list(g.db.query(Document).filter(Document.catalog_id==self.id).order_by(Document.id.desc()))
return data
def indexed_next(self):
data = self.nexts
idx = 0
for d in data:
yield idx,d
idx+=1
@property
def all_childs(self):
data = []
for child in self.nexts:
data.append(child)
if not child.doc_type:
for child_0 in child.nexts:
if not child_0.doc_type:
data.append(child_0)
for child_1 in child_0.nexts:
if not child_1.doc_type:
data.append(child_1)
#data.reverse()
return data
@property
def text(self):
lines = []
for child in self.all_childs:
if not child.doc_type:
lines.append(u"".join(["-"*(child.level),child.name]))
return u"\n".join(lines)
@property
def doc_type(self):
return False
class Document(Base):
title = Column(String(200))
content = Column(Text)
catalog_id = Column(Integer)
poster = Column(String(40))
__table_args__ = (
UniqueConstraint(title,),
TABLEARGS
)
def __init__(self, title, content, catalog_id, poster):
self.title = title
self.content = content
self.catalog_id = catalog_id
self.poster = poster
def append_attachment(self, name, attach_type, content):
return g.db.add(Attachment(self.id, name, attach_type, content))
@property
def attachments(self):
return g.db.query(Attachment).filter(Attachment.document_id==self.id).order_by(Attachment.id.asc())
@property
def path_catalogs(self):
catalog = g.db.query(Catalogs).get(self.catalog_id)
datas = [catalog,]
while True:
try:
catalog = g.db.query(Catalogs).get(catalog.forward_id)
if not catalog:
break
datas.append(catalog)
except:
break
datas.reverse()
return datas
@property
def doc_type(self):
return True
@property
def nexts(self):
return []
class Attachment(Base):
document_id = Column(Integer)
name = Column(String(40))
attachment_type =Column(Integer) #TODO: 0=文本,1=url
text_content = Column(Text)
url_content = Column(String(100))
__table_args__ = TABLEARGS
def __init__(self, document_id, name, attachment_type, content):
self.document_id = document_id
self.name = name
self.attachment_type = attachment_type
if attachment_type:
self.url_content = content
else:
self.text_content = content
def __repr__(self):
return self.url_content if self.attachment_type else self.text_content
@property
def content(self):
return str(self)
| Python |
#coding=utf8
__author__ = 'alex'
import logging
from flask import g
from models import *
class BizException(BaseException):
pass
def init_site_data():
def do_or_fail(obj):
try:
g.db.add(obj)
g.db.flush()
g.db.commit()
logging.info("object added")
except:
g.db.rollback()
logging.info("%s already exists"%obj)
do_or_fail(Admin("admin","1qasw2","管理员"))
do_or_fail(Admin("alex","1qasw2","压力很大"))
do_or_fail(Catalogs("消费资讯",0,0,0))
do_or_fail(Catalogs("消费教育",0,0,1))
do_or_fail(Catalogs("消费指导",0,0,2))
do_or_fail(Catalogs("消费调解",0,0,3))
do_or_fail(Catalogs("消费调查",0,0,4))
def manager_exists(name):
return g.db.query(Admin.name==name).count()
def manager(name):
return g.db.query(Admin).filter(Admin.name==name).one()
def _process_line(line):
idx=1
for i in xrange(5):
if line[i]!="-":
return i,line[idx-1:]
idx+=1
def _process_text(text):
data = []
idx = 0
for line in text.split("\n"):
if not line:
continue
lv,ln = _process_line(line)
names = ln.split("|")
if len(names)>1:
name, new_name = names
else:
name, new_name = names[0], None
data.append(dict(
name=name.strip(),
new_name=new_name.strip() if new_name else None,
fid = None,
level = lv,
idx = idx,
))
idx+=1
return data
def _log_steps(pool,lv,f,l):
logging.error("%s -lv=%s -fahter=%s -last=%s"%([obj.id for obj in pool],lv,f,l))
def catalog_text():
top_cats = g.db.query(Catalogs).filter(Catalogs.level==0).order_by(Catalogs.idx.asc())
lines = []
for cat in top_cats:
lines.append(cat.name)
txt = cat.text
if txt:
lines.append(txt)
logging.error(u"%s"%lines)
return u"\n".join(lines)
def sync_catalogs(top_id, text):
text = text.replace(u"|",u"|").replace(u"-",u"-")
items = _process_text(text)
if items[0]["level"]!=0:
raise BizException, u"格式不正确"
catalogs = g.db.query(Catalogs).order_by(Catalogs.idx.asc())
named_catalogs = dict([(cat.name,cat) for cat in catalogs])
new_catalogs = dict([(item["name"],item) for item in items])
current_level = 0
objpool = []
current_father = top_id
last_obj = None
for item in items:
if item["name"] not in named_catalogs:
if current_level==item["level"]:
node = Catalogs(item["name"], current_father, item["level"], item["idx"])
g.db.add(node)
g.db.flush()
if current_level<item["level"]:
node = Catalogs(item["name"], last_obj.id, item["level"], item["idx"])
g.db.add(node)
g.db.flush()
objpool.append(last_obj)
current_father = last_obj.id
if current_level>item["level"]:
sub = current_level-item["level"]
for i in range(sub):
current_father = objpool.pop().forward_id
node = Catalogs(item["name"], current_father, item["level"], item["idx"])
g.db.add(node)
g.db.flush()
last_obj = node
current_level=item["level"]
else:
node = named_catalogs[item["name"]]
node.idx = item["idx"]
if item["new_name"]:
node.name = item["new_name"]
if current_level<item["level"]:
objpool.append(last_obj)
current_father = last_obj.id
if node.level != item["level"]:
node.forward_id = last_obj.id
if current_level>item["level"]:
sub = current_level-item["level"]
for i in range(sub):
current_father = objpool.pop().forward_id
if node.level != item["level"]:
node.forward_id = current_father
node.level = item["level"]
last_obj = node
current_level=item["level"]
g.db.flush()
_log_steps(objpool,current_level,current_father,last_obj.id)
for name, value in named_catalogs.iteritems():
if name not in new_catalogs:
g.db.delete(value)
g.db.flush()
g.db.commit()
def top_level():
return g.db.query(Catalogs).filter(Catalogs.level==0).order_by(Catalogs.idx.asc())
def _strip_end(txt):
txt = txt.strip()
if txt[-1:]=="\r":
return txt[:-1]
return txt
def _splite_content(content):
lines = content.split("\n")
pool = []
datas = []
for line in lines:
line = _strip_end(line)
if set(list(line))!=set(["-",]):
pool.append(line)
else:
datas.append("\n".join(pool))
del pool[:]
if pool:
datas.append("\n".join(pool))
return datas
def _process_type(content):
if content.startswith("http"):
return 0
line=content.strip().split("\n")[0]
if line[-3:].lower() in ['jpg','gif','png','bmp']:
return 1
else:
return 2
def _process_attachment(data):
lines=data.split("\n")
title = lines[0]
content = "\n".join(lines[1:])
thetype = _process_type(content)
return title, thetype, content
def process_content(content):
datas = _splite_content(content)
attachments = datas[1:]
txt_content = datas[0]
atts = []
for att in attachments:
atts.append(_process_attachment(att))
return txt_content, atts
def post_document(catalog_id, title, content, poster):
tx_content, attrs = process_content(content)
document = Document(title, tx_content, catalog_id, poster)
g.db.add(document)
g.db.flush()
for attr in attrs:
document.append_attachment(*attr)
g.db.flush()
g.db.commit()
def txt_document(document_id):
document = g.db.query(Document).get(document_id)
attachments = g.db.query(Attachment).filter(Attachment.document_id==document_id).order_by(Attachment.id.asc())
slice = []
slice.append(document.content)
slice.append("-------------------")
for attachment in attachments:
slice.append(attachment.name)
slice.append(str(attachment))
return "\r\n".join(slice)
def update_document(document, title, content, catalog_id):
document.title = title
document.catalog_id = catalog_id
tx_content, attrs = process_content(content)
document.content = tx_content
g.db.query(Attachment).filter(Attachment.document_id==document.id).delete()
for attr in attrs:
document.append_attachment(*attr)
g.db.flush()
g.db.commit()
| Python |
#coding=utf8
__author__ = 'alex'
DEBUG = True
LOCAL = True
MEDIA_ROOT = "/static/"
if LOCAL:
DB_URI = "mysql://root:123456@127.0.0.1:3306/315db?charset=utf8"
else:
DB_URI = "mysql://root:123456@127.0.0.1:3306/315db?charset=utf8"
TIMEOUT = 3600
SECRET_KEY = "11556666433221changge!" | Python |
#coding=utf8
__author__ = 'alex'
from flask import g, jsonify, request, flash, url_for
from flask_mail import Message
import os
import settings
import time
import random
import json
import hmac
import redis
from pyDes import *
import sys, traceback
import urllib
from uuid import uuid4
def execute(query, page_idx, page_size):
total = query.count()
start, end = db_idx(page_idx, page_size)
return total, query[start:end]
def mail(to_email, title, message):
msg = Message(u"【艾普云】%s"%title, sender=settings.DEFAULT_MAIL_SENDER, recipients=[to_email,])
msg.html = message
g.mail.send(msg)
pad = lambda s, BS: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s: s[0:-ord(s[-1])]
def db_idx(page_idx, page_size):
return (page_idx - 1) * page_size, page_idx*page_size
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
def format_time(t):
return u"%s年%s月%s日%s时%s分%s秒"%(t.year,t.month,t.day,t.hour,t.minute,t.second)
def format_date(t):
return u"%s年%s月%s日"%(t.year,t.month,t.day)
def hash_passwd(raw_password):
salt = "".join(random.sample('abcdefghijklmnopqrstuvwxyz1234567890ABSDXFJOHDXFH',4))
return "$".join([salt,hmac.new(salt,tob(raw_password)).hexdigest()])
def check_passwd(raw_password,enc_password):
salt,hmac_password=tob(enc_password).split('$')
if hmac.new(salt,tob(raw_password)).hexdigest() == hmac_password:
return True
def serial_maker():
t = str(int(time.time()*1000))
salt = "".join(random.sample('QAZXSWEDCVFRTGBNHYUJMKILOP',4))
return "%s%s%s"%(settings.SERVER_ID,t,salt)
def read_random(length):
import random
save_str = "ABCDEFGHJKLMNPQRSTWXYZ2346789"
return random.sample(save_str,length)
def generate_code_image(size, length):
from PIL import Image, ImageDraw, ImageFilter, ImageFont
font_path = os.path.abspath(os.path.join(os.getcwd(),"code.ttf"))
font = ImageFont.truetype(font_path, 22)
words = "".join(read_random(length))
w, h = size
font_w, font_h = font.getsize(words)
img = Image.new('RGB',size,(255,255,255))
draw = ImageDraw.Draw(img)
draw.text((5,5),words,font=font,fill=(40,40,40))
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE)
return img, words
def encrypt_des(content, key):
try:
encryptor = des(key, CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
return encryptor.encrypt(content)
except Exception,e:
raise e
def decrypt_des(content, key):
try:
encryptor = des(key, CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
return encryptor.decrypt(content)
except Exception,e:
raise e
def login_required(f):
from functools import wraps
from flask import g, redirect, url_for, request, flash
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logined:
if request.is_xhr:
return json_response(False, u"您还没有登陆")
flash(request.url,category="goto")
return redirect(url_for(settings.LOGIN_MOD))
return f(*args, **kwargs)
return decorated_function
def json_response(result,data):
return jsonify(dict(r=result,info=data))
def woops(message, next="/"):
from flask import redirect, url_for, flash
flash(message)
return redirect(url_for("index.show_error", next=next))
class Frame(object):
def __init__(self, tb):
self.tb = tb
frame = tb.tb_frame
self.locals = {}
self.locals.update(frame.f_locals)
def print_path(self):
return touni(traceback.format_tb(self.tb, limit=1)[0])
def print_local(self):
return u"\n".join(["%s=%s" % (k, self.dump_value(self.locals[k])) for k in self.locals])
def dump_value(self, v):
try:
return touni(str(v))
except:
return u"value can not serilizable"
def print_debug(ex):
exc_type, exc_value, exc_traceback = sys.exc_info()
frames = []
tb = exc_traceback
frames.append(tb.tb_frame)
detail = u"alex error -Exception:%s\n" % ex
while tb.tb_next:
tb = tb.tb_next
fm = Frame(tb)
detail += fm.print_path()
detail += u"\nlocals variables:\n"
detail += fm.print_local()
detail += u"\n-------------------------------------------------------\n"
return detail
def pages(item_count, page_size):
from flask import request
base_url = request.path
page_id = int(request.args.get("p","1"))
def make_url(base,pid):
base=tob(base)
if not pid:
return ""
url_slice=base.split('?')
if len(url_slice)<2:
return base+"?p=%s"%pid
else:
params=dict([(lambda i:tuple(i) if len(i)<3 else (i[0],"=".join(i[1:])))(item.split("=")) for item in url_slice[1].split('&')])
params["p"]=pid
return "%s?%s"%(url_slice[0],urllib.urlencode(params))
page_count=item_count/page_size+1 if item_count%page_size else item_count/page_size
if page_count<10:
return [(i+1,make_url(base_url,i+1)) for i in range(page_count)]
else:
if page_id<5:
return [(p,make_url(base_url,p)) for p in [1,2,3,4,5,0,page_count]]
if page_id>(page_count-4):
return [(p,make_url(base_url,p)) for p in [1,0,page_count-4,page_count-3,page_count-2,page_count-1,page_count]]
return [(p,make_url(base_url,p)) for p in [1,0,page_id-2,page_id-1,page_id,page_id+1,page_id+2,0,page_count]]
class Cache(object):
def __init__(self,host,db):
self.redis = redis.Redis(host=host,port=6379,db=db)
def _gen_key(self,key,pub=False):
return "aipuyun_publish_%s"%key if pub else "aipuyun_object_%s"%key
def get(self, name, loads = json.loads):
data = self.redis.get(self._gen_key(name))
if data:
return loads(data)
def set(self, name, value, ttl=3600, dumps = json.dumps):
if ttl:
return self.redis.setex(self._gen_key(name), dumps(value), ttl)
return self.redis.set(self._gen_key(name), dumps(value))
def __getitem__(self, item):
return self.get(item)
def __setitem__(self, key, value):
return self.set(key,value,ttl=0)
def delete(self, key):
self.redis.delete(self._gen_key(key))
def subscribe(self, channels):
self.pubsub = self.redis.pubsub()
self.pubsub.subscribe(channels)
def publish(self, channel, message):
self.redis.publish(channel, json.dumps(message))
def listen(self, on_message):
for message in self.pubsub.listen():
if message.get("type") == "message":
on_message(message.get("channel"), json.loads(message.get("data")))
def validate_forms(form_class,form_data):
form = form_class(form_data)
if not form.validate():
for name, value in request.form.iteritems():
flash(dict(name=name, value=value),category="backinfo")
for k,v in form.errors.iteritems():
flash(v[0],category="error")
return False, None
return True, form
def add_error(message):
flash(message,category="error")
for name, value in request.form.iteritems():
flash(dict(name=name, value=value),category="backinfo")
def add_success(message):
flash(message,category="success")
def send_error(error):
error_id = uuid4().hex
key = "error:%s"%error_id
g.cache.set(key,error,ttl=3600)
return error_id
def get_error(error_id):
key = "error:%s"%error_id
return g.cache.get(key)
def set_lock(target_mod, params, ttl=3600*24*7):
lock_id = uuid4().hex
key = "lock:%s"%lock_id
g.cache.set(key,params,ttl=ttl)
params.update(v=lock_id)
return "%s%s"%(settings.SERVER, url_for(target_mod,**params))
def verify_lock(lock_id):
key = "lock:%s"%lock_id
params = g.cache.get(key)
if params:
g.cache.delete(key)
return params
def test_lock(lock_id):
key = "lock:%s"%lock_id
if g.cache.get(key):
return True
def choose(pay_type, obj):
if pay_type==0:
return obj.monthly_price
if pay_type==1:
return obj.yearly_price
if pay_type==2:
return obj.quarter_price
if pay_type==3:
return obj.halfyear_price
def compute_exp(dt,pay_type,length):
import datetime
import calendar
if pay_type:
exp_dt = dt + datetime.timedelta(days=365)
else:
if (length+dt.month) > 11:
year = dt.year + (length+dt.month+1)/12
month = (length+dt.month+1)%12
day = dt.day
else:
year = dt.year
month = length+dt.month
day = dt.day
if month==2 and day>28:
if calendar.isleap(year):
day=29
else:
day=28
print year,month,day
exp_dt = datetime.datetime(year,month,day)
return exp_dt
def send_sms(phone_number,text):
import requests
import logging
text=tob(text,enc="gbk")
r=requests.post("http://www.k8686.com/Api/BayouSmsApi.aspx",data=dict(
func="sendsms",
username="8006980012",
password="",
smstype=1,
timerflag=0,
mobiles=phone_number,
message=text
))
if 200<=r.status_code<300:
logging.INFO(r.text)
| Python |
#coding=utf8
__author__ = 'alex'
import logging
from flask import Blueprint, render_template, abort, g, request, redirect, url_for, session, flash
from models import *
from biz import *
from utils import *
admin = Blueprint('admin', __name__,template_folder='templates',url_prefix='/admin')
@admin.route("/login")
def login():
return render_template("login.html",**locals())
@admin.route("/login", methods=["POST"])
def check_login():
name = request.form.get("name")
password = request.form.get("password")
try:
if manager(name).verify(password):
session["user_name"]=name
add_success(u"登陆成功")
else:
add_error(u"用户名秘密不正确")
return redirect(url_for("admin.login"))
except Exception, e:
add_error(u"用户名秘密不正确")
logging.error(print_debug(e))
return redirect(url_for("admin.login"))
return redirect(url_for("admin.index"))
@admin.app_template_filter(name="menu")
def menu(txt):
catalogs = top_level()
return render_template("include/menu.html",**locals())
@admin.app_template_filter(name="date")
def format_date(dt):
return u"%s年%s月%s日"%(dt.year,dt.month,dt.day)
@admin.route("")
def index():
return render_template("admin_index.html",**locals())
@admin.route("/catalog")
def catalog_manage():
catalog = catalog_text()
return render_template("manage_catalogs.html", **locals())
@admin.route("/catalog", methods = ["POST"])
def catalog_sync():
txt = request.form.get("txt")
try:
sync_catalogs(0,txt)
add_success(u"更新成功")
except BizException, e:
add_error(e.message)
except Exception, e:
add_error(u"未知异常")
g.db.rollback()
logging.error(print_debug(e))
return redirect(url_for("admin.catalog_manage"))
@admin.route("/<catalog_id>/document")
def manage_document(catalog_id):
catalog = g.db.query(Catalogs).get(catalog_id)
cats = [cat.id for cat in catalog.all_childs]
cats.append(catalog.id)
documents = g.db.query(Document).filter(Document.catalog_id.in_(cats)).order_by(Document.id.desc())
return render_template("manage_document.html",**locals())
@admin.route("/<catalog_id>/document/new")
def new_document(catalog_id):
catalog = g.db.query(Catalogs).get(catalog_id)
selected_catalog = session["selected_catalog"] if "selected_catalog" in session else None
return render_template("add_new_document.html",**locals())
@admin.route("/<catalog_id>/document/new", methods = ["POST"])
def add_document(catalog_id):
title = request.form.get("title")
content = request.form.get("content")
select_catalog_id = request.form.get("catalog")
try:
post_document(select_catalog_id,title,content,"alex")
add_success(u"%s 添加成功"%title)
session["selected_catalog"] = select_catalog_id
except Exception, e:
add_error(u"未知异常")
logging.error(print_debug(e))
g.db.rollback()
return redirect(url_for("admin.new_document",catalog_id=catalog_id))
@admin.route("/<catalog_id>/document/<document_id>/edit")
def edit_document(catalog_id, document_id):
catalog = g.db.query(Catalogs).get(catalog_id)
document = g.db.query(Document).get(document_id)
selected_catalog = g.db.query(Catalogs).get(document.catalog_id).id
txt = txt_document(document_id)
return render_template("edit_document.html",**locals())
@admin.route("/<catalog_id>/document/<document_id>/edit", methods=["POST"])
def update_document_act(catalog_id, document_id):
document = g.db.query(Document).get(document_id)
title = request.form.get("title")
content = request.form.get("content")
catalog_id_new = request.form.get("catalog")
try:
update_document(document, title, content, catalog_id_new)
add_success(u"文档%s修改成功"%title)
except Exception, e:
add_error(u"未知异常")
logging.error(print_debug(e))
g.db.rollback()
return redirect(url_for("admin.manage_document",catalog_id=catalog_id)) | Python |
#coding=utf8
__author__ = 'alex'
from flask import Blueprint, render_template, abort, g, request, redirect, url_for, session, flash, send_file
from utils import *
from biz import *
from models import *
index = Blueprint('index', __name__,template_folder='templates',url_prefix='/')
@index.app_template_filter(name="content")
def show_content(document):
if document:
lines=document.content.split('\n')
if lines[0].startswith("http://"):
url=lines[0]
content = u"\n".join(lines[1:])
else:
url=None
content = document.content
return render_template("include/content.html",**locals())
else:
return ""
@index.app_template_filter(name="one")
def one(doc_list):
if doc_list:
return doc_list[0]
return None
@index.app_template_filter(name="nav")
def menu(txt):
catalogs = top_level()
return render_template("include/top_nav.html",**locals())
@index.route("")
def front():
return render_template("front.html",**locals())
@index.route("<page_name>.html")
def page(page_name):
return render_template("%s.html"%page_name,**locals())
@index.route("doc/<catalog_id>.html")
def catalog(catalog_id):
catalog = g.db.query(Catalogs).get(catalog_id)
return render_template("document.html",**locals()) | Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main program for Rietveld.
This is also a template for running a Django app under Google App
Engine, especially when using a newer version of Django than provided
in the App Engine standard library.
The site-specific code is all in other files: urls.py, models.py,
views.py, settings.py.
"""
# Standard Python imports.
import os
import sys
import logging
# Log a message each time this module get loaded.
logging.info('Loading %s, app version = %s',
__name__, os.getenv('CURRENT_VERSION_ID'))
import appengine_config
# AppEngine imports.
from google.appengine.ext.webapp import util
# Import webapp.template. This makes most Django setup issues go away.
from google.appengine.ext.webapp import template
# Helper to enter the debugger. This passes in __stdin__ and
# __stdout__, because stdin and stdout are connected to the request
# and response streams. You must import this from __main__ to use it.
# (I tried to make it universally available via __builtin__, but that
# doesn't seem to work for some reason.)
def BREAKPOINT():
import pdb
p = pdb.Pdb(None, sys.__stdin__, sys.__stdout__)
p.set_trace()
# Import various parts of Django.
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
import django.forms
# Work-around to avoid warning about django.newforms in djangoforms.
django.newforms = django.forms
def log_exception(*args, **kwds):
"""Django signal handler to log an exception."""
cls, err = sys.exc_info()[:2]
logging.exception('Exception in request: %s: %s', cls.__name__, err)
# Log all exceptions detected by Django.
django.core.signals.got_request_exception.connect(log_exception)
# Unregister Django's default rollback event handler.
django.core.signals.got_request_exception.disconnect(
django.db._rollback_on_exception)
def real_main():
"""Main program."""
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
def profile_main():
"""Main program for profiling."""
import cProfile
import pstats
import StringIO
prof = cProfile.Profile()
prof = prof.runctx('real_main()', globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
# stats.strip_dirs() # Don't; too many modules are named __init__.py.
stats.sort_stats('time') # 'time', 'cumulative' or 'calls'
stats.print_stats() # Optional arg: how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
print '\n<hr>'
print '<h1>Profile</h1>'
print '<pre>'
print stream.getvalue()[:1000000]
print '</pre>'
# Set this to profile_main to enable profiling.
main = real_main
if __name__ == '__main__':
main()
| Python |
# Removes duplicate nicknames (issue99).
#
# To run this script:
# - Make sure App Engine library (incl. yaml) is in PYTHONPATH.
# - Make sure that the remote API is included in app.yaml.
# - Run "tools/appengine_console.py APP_ID".
# - Import this module.
# - update_accounts.run() updates accounts.
# - Use the other two functions to fetch accounts or find duplicates
# without any changes to the datastore.
from google.appengine.ext import db
from codereview import models
def fetch_accounts():
query = models.Account.all()
accounts = {}
results = query.fetch(100)
while results:
last = None
for account in results:
if account.lower_nickname in accounts:
accounts[account.lower_nickname].append(account)
else:
accounts[account.lower_nickname] = [account]
last = account
if last is None:
break
results = models.Account.all().filter('__key__ >',
last.key()).fetch(100)
return accounts
def find_duplicates(accounts):
tbd = []
while accounts:
_, entries = accounts.popitem()
if len(entries) > 1:
# update accounts, except the fist: it's the lucky one
for num, account in enumerate(entries[1:]):
account.nickname = '%s%d' % (account.nickname, num+1)
account.lower_nickname = account.nickname.lower()
account.fresh = True # display "change nickname..."
tbd.append(account)
return tbd
def run():
accounts = fetch_accounts()
print '%d accounts fetched' % len(accounts)
tbd = find_duplicates(accounts)
print 'Updating %d accounts' % len(tbd)
db.put(tbd)
print 'Updated accounts:'
for account in tbd:
print ' %s' % account.email
| Python |
"""Configuration."""
import logging
import os
import re
from google.appengine.ext.appstats import recording
logging.info('Loading %s from %s', __name__, __file__)
# Custom webapp middleware to add Appstats.
def webapp_add_wsgi_middleware(app):
app = recording.appstats_wsgi_middleware(app)
return app
# Custom Appstats path normalization.
def appstats_normalize_path(path):
if path.startswith('/user/'):
return '/user/X'
if path.startswith('/user_popup/'):
return '/user_popup/X'
if path.startswith('/rss/'):
i = path.find('/', 5)
if i > 0:
return path[:i] + '/X'
return re.sub(r'\d+', 'X', path)
# Declare the Django version we need.
from google.appengine.dist import use_library
use_library('django', '1.2')
# Fail early if we can't import Django 1.x. Log identifying information.
import django
logging.info('django.__file__ = %r, django.VERSION = %r',
django.__file__, django.VERSION)
assert django.VERSION[0] >= 1, "This Django version is too old"
# Custom Django configuration.
# NOTE: All "main" scripts must import webapp.template before django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.conf import settings
settings._target = None
| Python |
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils."""
import os
from google.appengine.ext import testbed
from django.test import TestCase as _TestCase
class TestCase(_TestCase):
"""Customized Django TestCase.
This class disables the setup of Django features that are not
available on App Engine (e.g. fixture loading). And it initializes
the Testbad class provided by the App Engine SDK.
"""
def _fixture_setup(self): # defined in django.test.TestCase
pass
def _fixture_teardown(self): # defined in django.test.TestCase
pass
def setUp(self):
super(TestCase, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_user_stub()
def tearDown(self):
self.testbed.deactivate()
super(TestCase, self).tearDown()
def login(self, email):
"""Logs in a user identified by email."""
os.environ['USER_EMAIL'] = email
def logout(self):
"""Logs the user out."""
os.environ['USER_EMAIL'] = ''
| Python |
#!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test case runner."""
import os
import re
import sys
import unittest
TESTS_DIR = os.path.dirname(__file__)
def collect_test_modules():
"""Collects and yields test modules."""
for fname in os.listdir(TESTS_DIR):
if not re.match(r'test_.*\.py$', fname):
continue
try:
yield __import__(fname[:-3])
except ImportError, err:
sys.stderr.write('Failed to import %s: %s\n' % (fname, err))
raise StopIteration
def setup_test_env(sdk_path):
"""Sets up App Engine/Django test environment."""
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
# google.appengine.ext.testbed.Testbed should set SERVER_SOFTWARE
# and APPLICATION_ID environment variables, but we need them
# earlier when Django import settings.py.
os.environ['SERVER_SOFTWARE'] = 'DevTestrunner' # used in settings.py
os.environ['APPLICATION_ID'] = 'test-codereview' # used in settings.py
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
def main():
"""Builds test suite from collected test modules and runs it."""
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for module in collect_test_modules():
suite.addTests(loader.loadTestsFromModule(module))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
if len(sys.argv) != 2:
sdk_path = os.path.join('..', 'google_appengine')
if not os.path.exists(os.path.join(sdk_path, 'dev_appserver.py')):
sys.stderr.write('usage: %s SDK_PATH\n' % sys.argv[0])
sys.exit(1)
else:
sdk_path = sys.argv[1]
setup_test_env(sdk_path)
main()
| Python |
# Re-puts entities of a given type, to set newly added properties.
#
# To run this script:
# - Make sure App Engine library (incl. yaml) is in PYTHONPATH.
# - Make sure that the remote API is included in app.yaml.
# - Run "tools/appengine_console.py APP_ID".
# - Import this module.
# - Import models from codereview.
# - update_entities.run(models.Issue) updates issues.
import logging
from google.appengine.ext import db
from codereview import models
import urllib2
def run(model_class, batch_size=100, last_key=None):
while True:
q = model_class.all()
if last_key:
q.filter('__key__ >', last_key)
q.order('__key__')
this_batch_size = batch_size
while True:
try:
try:
batch = q.fetch(this_batch_size)
except urllib2.URLError, err:
if 'timed out' in str(err):
raise db.Timeout
else:
raise
break
except db.Timeout:
logging.warn("Query timed out, retrying")
if this_batch_size == 1:
logging.critical("Unable to update entities, aborting")
return
this_batch_size //= 2
if not batch:
break
keys = None
while not keys:
try:
keys = db.put(batch)
except db.Timeout:
logging.warn("Put timed out, retrying")
last_key = keys[-1]
print "Updated %d records" % (len(keys),)
| Python |
#!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import code
import getpass
import logging
import optparse
import os
import re
import sys
ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
LIB = os.path.join(ROOT, '..', 'google_appengine', 'lib')
sys.path.insert(0, os.path.join(ROOT, '..', 'google_appengine'))
sys.path.append(os.path.join(LIB, 'django_1_2'))
sys.path.append(os.path.join(LIB, 'fancy_urllib'))
sys.path.append(os.path.join(LIB, 'simplejson'))
sys.path.append(os.path.join(LIB, 'webob'))
sys.path.append(os.path.join(LIB, 'yaml', 'lib'))
sys.path.append(ROOT)
from google.appengine.ext.remote_api import remote_api_stub
import yaml
def default_auth_func():
user = os.environ.get('EMAIL_ADDRESS')
if user:
print('User: %s' % user)
else:
user = raw_input('Username:')
return user, getpass.getpass('Password:')
def smart_auth_func():
"""Try to guess first."""
try:
return os.environ['EMAIL_ADDRESS'], open('.pwd').readline().strip()
except (KeyError, IOError):
return default_auth_func()
def default_app_id(directory):
return yaml.load(open(os.path.join(directory, 'app.yaml')))['application']
def setup_env(app_id, host=None, auth_func=None):
"""Setup remote access to a GAE instance."""
auth_func = auth_func or smart_auth_func
host = host or '%s.appspot.com' % app_id
# pylint: disable=W0612
from google.appengine.api import memcache
from google.appengine.api.users import User
from google.appengine.ext import db
remote_api_stub.ConfigureRemoteDatastore(
app_id, '/_ah/remote_api', auth_func, host)
# Initialize environment.
os.environ['SERVER_SOFTWARE'] = ''
import appengine_config
# Create shortcuts.
import codereview
from codereview import models, views
# Symbols presented to the user.
predefined_vars = locals().copy()
del predefined_vars['appengine_config']
del predefined_vars['auth_func']
# Load all the models.
for i in dir(models):
if re.match(r'[A-Z][a-z]', i[:2]):
predefined_vars[i] = getattr(models, i)
return predefined_vars
def main():
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='count')
options, args = parser.parse_args()
if not args:
app_id = default_app_id(ROOT)
else:
app_id = args[0]
host = None
if len(args) > 1:
host = args[1]
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
predefined_vars = setup_env(app_id, host)
prompt = (
'App Engine interactive console for "%s".\n'
'Available symbols:\n'
' %s\n') % (app_id, ', '.join(sorted(predefined_vars)))
code.interact(prompt, None, predefined_vars)
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
import os
import sys
from optparse import HelpFormatter
sys.path.append(os.path.abspath('static/'))
import upload
class GCWikiHelpFormatter (HelpFormatter):
"""Format help with wiki markup for Google Code."""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
self._dl_open = False
def indent(self):
self._pending = 'INDENT'
HelpFormatter.indent(self)
def dedent(self):
self._pending = 'DEDENT'
HelpFormatter.dedent(self)
def format_usage(self, usage):
return "*Usage summary:* `%s`\n" % usage
def format_heading(self, heading):
if self._dl_open:
pre = '\n</dl>\n'
else:
pre = ''
markup = '='*(self.current_indent+2)
self._dl_open = True
return "%s%s %s %s\n<dl>\n" % (pre, markup, heading, markup)
def format_option(self, option):
result = []
opts = self.option_strings[option]
result.append('<dt>`%s`</dt>\n' % opts)
if option.help:
help_text = '<dd>%s</dd>\n' % self.expand_default(option)
result.append(help_text)
return ''.join(result)
def main():
upload.parser.formatter = GCWikiHelpFormatter()
print HEADER
print upload.parser.format_option_help()
print '</dl>' # TODO: Formatter should do this
print
HEADER = """#summary upload.py usage and options.
<wiki:comment>
THIS PAGE IS AUTOGENERATED. DO NOT EDIT.
To update this page run tools/uploadopts2wiki.py
</wiki:comment>
= upload.py Usage =
[http://codereview.appspot.com/static/upload.py upload.py] is a tool
for uploading diffs from a version control system to the codereview app.
*Usage summary:*
{{{upload.py [options] [-- diff_options]}}}
Diff options are passed to the diff command of the underlying system.
*Supported version control systems:*
* Git
* Mercurial
* Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
if __name__ == '__main__':
main()
| Python |
# Copyright 2008-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Top-level URL mappings for Rietveld."""
# NOTE: Must import *, since Django looks for things here, e.g. handler500.
from django.conf.urls.defaults import *
# If you don't want to run Rietveld from the root level, add the
# subdirectory as shown in the following example:
#
# url(r'subpath/', include('codereview.urls')),
#
urlpatterns = patterns(
'',
url(r'', include('codereview.urls')),
)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal Django settings."""
import os
from google.appengine.api import app_identity
APPEND_SLASH = False
DEBUG = os.environ['SERVER_SOFTWARE'].startswith('Dev')
INSTALLED_APPS = (
'codereview',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'codereview.middleware.AddUserToRequestMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
)
TEMPLATE_DEBUG = DEBUG
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
)
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
FILE_UPLOAD_MAX_MEMORY_SIZE = 1048576 # 1 MB
MEDIA_URL = '/static/'
appid = app_identity.get_application_id()
RIETVELD_INCOMING_MAIL_ADDRESS = ('reply@%s.appspotmail.com' % appid)
RIETVELD_INCOMING_MAIL_MAX_SIZE = 500 * 1024 # 500K
RIETVELD_REVISION = '<unknown>'
try:
RIETVELD_REVISION = open(
os.path.join(os.path.dirname(__file__), 'REVISION')
).read()
except:
pass
UPLOAD_PY_SOURCE = os.path.join(os.path.dirname(__file__), 'upload.py')
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Rietveld."""
# Python imports
import logging
import md5
import os
import re
import time
# AppEngine imports
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import users
# Local imports
import engine
import patching
CONTEXT_CHOICES = (3, 10, 25, 50, 75, 100)
### GQL query cache ###
_query_cache = {}
def gql(cls, clause, *args, **kwds):
"""Return a query object, from the cache if possible.
Args:
cls: a db.Model subclass.
clause: a query clause, e.g. 'WHERE draft = TRUE'.
*args, **kwds: positional and keyword arguments to be bound to the query.
Returns:
A db.GqlQuery instance corresponding to the query with *args and
**kwds bound to the query.
"""
query_string = 'SELECT * FROM %s %s' % (cls.kind(), clause)
query = _query_cache.get(query_string)
if query is None:
_query_cache[query_string] = query = db.GqlQuery(query_string)
query.bind(*args, **kwds)
return query
### Issues, PatchSets, Patches, Contents, Comments, Messages ###
class Issue(db.Model):
"""The major top-level entity.
It has one or more PatchSets as its descendants.
"""
subject = db.StringProperty(required=True)
description = db.TextProperty()
base = db.StringProperty()
local_base = db.BooleanProperty(default=False)
owner = db.UserProperty(auto_current_user_add=True, required=True)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
reviewers = db.ListProperty(db.Email)
cc = db.ListProperty(db.Email)
closed = db.BooleanProperty(default=False)
private = db.BooleanProperty(default=False)
n_comments = db.IntegerProperty()
_is_starred = None
@property
def is_starred(self):
"""Whether the current user has this issue starred."""
if self._is_starred is not None:
return self._is_starred
account = Account.current_user_account
self._is_starred = account is not None and self.key().id() in account.stars
return self._is_starred
def user_can_edit(self, user):
"""Return true if the given user has permission to edit this issue."""
return user == self.owner
@property
def edit_allowed(self):
"""Whether the current user can edit this issue."""
account = Account.current_user_account
if account is None:
return False
return self.user_can_edit(account.user)
def update_comment_count(self, n):
"""Increment the n_comments property by n.
If n_comments in None, compute the count through a query. (This
is a transitional strategy while the database contains Issues
created using a previous version of the schema.)
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
self.n_comments += n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, it is computed through a query, and stored,
using n_comments as a cache.
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
return self.n_comments
def _get_num_comments(self):
"""Helper to compute the number of comments through a query."""
return gql(Comment,
'WHERE ANCESTOR IS :1 AND draft = FALSE',
self).count()
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this issue for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
class PatchSet(db.Model):
"""A set of patchset uploaded together.
This is a descendant of an Issue and has Patches as descendants.
"""
issue = db.ReferenceProperty(Issue) # == parent
message = db.StringProperty()
data = db.BlobProperty()
url = db.LinkProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
n_comments = db.IntegerProperty(default=0)
def update_comment_count(self, n):
"""Increment the n_comments property by n."""
self.n_comments = self.num_comments + n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, 0 is returned.
"""
# For older patchsets n_comments is None.
return self.n_comments or 0
class Message(db.Model):
"""A copy of a message sent out in email.
This is a descendant of an Issue.
"""
issue = db.ReferenceProperty(Issue) # == parent
subject = db.StringProperty()
sender = db.EmailProperty()
recipients = db.ListProperty(db.Email)
date = db.DateTimeProperty(auto_now_add=True)
text = db.TextProperty()
draft = db.BooleanProperty(default=False)
_approval = None
@property
def approval(self):
"""Is True when the message represents an approval of the review."""
if self._approval is None:
# Must contain 'lgtm' in a line that doesn't start with '>'.
self._approval = any(
True for line in self.text.lower().splitlines()
if not line.strip().startswith('>') and 'lgtm' in line)
# Must not be issue owner.
self._approval &= self.issue.owner.email() != self.sender
return self._approval
class Content(db.Model):
"""The content of a text file.
This is a descendant of a Patch.
"""
# parent => Patch
text = db.TextProperty()
data = db.BlobProperty()
# Checksum over text or data depending on the type of this content.
checksum = db.TextProperty()
is_uploaded = db.BooleanProperty(default=False)
is_bad = db.BooleanProperty(default=False)
file_too_large = db.BooleanProperty(default=False)
@property
def lines(self):
"""The text split into lines, retaining line endings."""
if not self.text:
return []
return self.text.splitlines(True)
class Patch(db.Model):
"""A single patch, i.e. a set of changes to a single file.
This is a descendant of a PatchSet.
"""
patchset = db.ReferenceProperty(PatchSet) # == parent
filename = db.StringProperty()
status = db.StringProperty() # 'A', 'A +', 'M', 'D' etc
text = db.TextProperty()
content = db.ReferenceProperty(Content)
patched_content = db.ReferenceProperty(Content, collection_name='patch2_set')
is_binary = db.BooleanProperty(default=False)
# Ids of patchsets that have a different version of this file.
delta = db.ListProperty(int)
delta_calculated = db.BooleanProperty(default=False)
_lines = None
@property
def lines(self):
"""The patch split into lines, retaining line endings.
The value is cached.
"""
if self._lines is not None:
return self._lines
if not self.text:
lines = []
else:
lines = self.text.splitlines(True)
self._lines = lines
return lines
_property_changes = None
@property
def property_changes(self):
"""The property changes split into lines.
The value is cached.
"""
if self._property_changes != None:
return self._property_changes
self._property_changes = []
match = re.search('^Property changes on.*\n'+'_'*67+'$', self.text,
re.MULTILINE)
if match:
self._property_changes = self.text[match.end():].splitlines()
return self._property_changes
_num_added = None
@property
def num_added(self):
"""The number of line additions in this patch.
The value is cached.
"""
if self._num_added is None:
self._num_added = self.count_startswith('+') - 1
return self._num_added
_num_removed = None
@property
def num_removed(self):
"""The number of line removals in this patch.
The value is cached.
"""
if self._num_removed is None:
self._num_removed = self.count_startswith('-') - 1
return self._num_removed
_num_chunks = None
@property
def num_chunks(self):
"""The number of 'chunks' in this patch.
A chunk is a block of lines starting with '@@'.
The value is cached.
"""
if self._num_chunks is None:
self._num_chunks = self.count_startswith('@@')
return self._num_chunks
_num_comments = None
@property
def num_comments(self):
"""The number of non-draft comments for this patch.
The value is cached.
"""
if self._num_comments is None:
self._num_comments = gql(Comment,
'WHERE patch = :1 AND draft = FALSE',
self).count()
return self._num_comments
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this patch for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE patch = :1 AND draft = TRUE AND author = :2',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
def count_startswith(self, prefix):
"""Returns the number of lines with the specified prefix."""
return len([l for l in self.lines if l.startswith(prefix)])
def get_content(self):
"""Get self.content, or fetch it if necessary.
This is the content of the file to which this patch is relative.
Returns:
a Content instance.
Raises:
engine.FetchError: If there was a problem fetching it.
"""
try:
if self.content is not None:
if self.content.is_bad:
msg = 'Bad content. Try to upload again.'
logging.warn('Patch.get_content: %s', msg)
raise engine.FetchError(msg)
if self.content.is_uploaded and self.content.text == None:
msg = 'Upload in progress.'
logging.warn('Patch.get_content: %s', msg)
raise engine.FetchError(msg)
else:
return self.content
except db.Error:
# This may happen when a Content entity was deleted behind our back.
self.content = None
content = engine.FetchBase(self.patchset.issue.base, self)
content.put()
self.content = content
self.put()
return content
def get_patched_content(self):
"""Get self.patched_content, computing it if necessary.
This is the content of the file after applying this patch.
Returns:
a Content instance.
Raises:
engine.FetchError: If there was a problem fetching the old content.
"""
try:
if self.patched_content is not None:
return self.patched_content
except db.Error:
# This may happen when a Content entity was deleted behind our back.
self.patched_content = None
old_lines = self.get_content().text.splitlines(True)
logging.info('Creating patched_content for %s', self.filename)
chunks = patching.ParsePatchToChunks(self.lines, self.filename)
new_lines = []
for tag, old, new in patching.PatchChunks(old_lines, chunks):
new_lines.extend(new)
text = db.Text(''.join(new_lines))
patched_content = Content(text=text, parent=self)
patched_content.put()
self.patched_content = patched_content
self.put()
return patched_content
@property
def no_base_file(self):
"""Returns True iff the base file is not available."""
return self.content and self.content.file_too_large
class Comment(db.Model):
"""A Comment for a specific line of a specific file.
This is a descendant of a Patch.
"""
patch = db.ReferenceProperty(Patch) # == parent
message_id = db.StringProperty() # == key_name
author = db.UserProperty(auto_current_user_add=True)
date = db.DateTimeProperty(auto_now=True)
lineno = db.IntegerProperty()
text = db.TextProperty()
left = db.BooleanProperty()
draft = db.BooleanProperty(required=True, default=True)
def complete(self, patch):
"""Set the shorttext and buckets attributes."""
# TODO(guido): Turn these into caching proprties instead.
# The strategy for buckets is that we want groups of lines that
# start with > to be quoted (and not displayed by
# default). Whitespace-only lines are not considered either quoted
# or not quoted. Same goes for lines that go like "On ... user
# wrote:".
cur_bucket = []
quoted = None
self.buckets = []
def _Append():
if cur_bucket:
self.buckets.append(Bucket(text="\n".join(cur_bucket),
quoted=bool(quoted)))
lines = self.text.splitlines()
for line in lines:
if line.startswith("On ") and line.endswith(":"):
pass
elif line.startswith(">"):
if quoted is False:
_Append()
cur_bucket = []
quoted = True
elif line.strip():
if quoted is True:
_Append()
cur_bucket = []
quoted = False
cur_bucket.append(line)
_Append()
self.shorttext = self.text.lstrip()[:50].rstrip()
# Grab the first 50 chars from the first non-quoted bucket
for bucket in self.buckets:
if not bucket.quoted:
self.shorttext = bucket.text.lstrip()[:50].rstrip()
break
class Bucket(db.Model):
"""A 'Bucket' of text.
A comment may consist of multiple text buckets, some of which may be
collapsed by default (when they represent quoted text).
NOTE: This entity is never written to the database. See Comment.complete().
"""
# TODO(guido): Flesh this out.
text = db.TextProperty()
quoted = db.BooleanProperty()
### Repositories and Branches ###
class Repository(db.Model):
"""A specific Subversion repository."""
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
def __str__(self):
return self.name
class Branch(db.Model):
"""A trunk, branch, or a tag in a specific Subversion repository."""
repo = db.ReferenceProperty(Repository, required=True)
# Cache repo.name as repo_name, to speed up set_branch_choices()
# in views.IssueBaseForm.
repo_name = db.StringProperty()
category = db.StringProperty(required=True,
choices=('*trunk*', 'branch', 'tag'))
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
### Accounts ###
class Account(db.Model):
"""Maps a user or email address to a user-selected nickname, and more.
Nicknames do not have to be unique.
The default nickname is generated from the email address by
stripping the first '@' sign and everything after it. The email
should not be empty nor should it start with '@' (AssertionError
error is raised if either of these happens).
This also holds a list of ids of starred issues. The expectation
that you won't have more than a dozen or so starred issues (a few
hundred in extreme cases) and the memory used up by a list of
integers of that size is very modest, so this is an efficient
solution. (If someone found a use case for having thousands of
starred issues we'd have to think of a different approach.)
"""
user = db.UserProperty(auto_current_user_add=True, required=True)
email = db.EmailProperty(required=True) # key == <email>
nickname = db.StringProperty(required=True)
default_context = db.IntegerProperty(default=engine.DEFAULT_CONTEXT,
choices=CONTEXT_CHOICES)
default_column_width = db.IntegerProperty(default=engine.DEFAULT_COLUMN_WIDTH)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
stars = db.ListProperty(int) # Issue ids of all starred issues
fresh = db.BooleanProperty()
uploadpy_hint = db.BooleanProperty(default=True)
notify_by_email = db.BooleanProperty(default=True)
notify_by_chat = db.BooleanProperty(default=False)
# Current user's Account. Updated by middleware.AddUserToRequestMiddleware.
current_user_account = None
lower_email = db.StringProperty()
lower_nickname = db.StringProperty()
xsrf_secret = db.BlobProperty()
# Note that this doesn't get called when doing multi-entity puts.
def put(self):
self.lower_email = str(self.email).lower()
self.lower_nickname = self.nickname.lower()
super(Account, self).put()
@classmethod
def get_account_for_user(cls, user):
"""Get the Account for a user, creating a default one if needed."""
email = user.email()
assert email
key = '<%s>' % email
# Since usually the account already exists, first try getting it
# without the transaction implied by get_or_insert().
account = cls.get_by_key_name(key)
if account is not None:
return account
nickname = cls.create_nickname_for_user(user)
return cls.get_or_insert(key, user=user, email=email, nickname=nickname,
fresh=True)
@classmethod
def create_nickname_for_user(cls, user):
"""Returns a unique nickname for a user."""
name = nickname = user.email().split('@', 1)[0]
next_char = chr(ord(nickname[0].lower())+1)
existing_nicks = [account.lower_nickname
for account in cls.gql(('WHERE lower_nickname >= :1 AND '
'lower_nickname < :2'),
nickname.lower(), next_char)]
suffix = 0
while nickname.lower() in existing_nicks:
suffix += 1
nickname = '%s%d' % (name, suffix)
return nickname
@classmethod
def get_nickname_for_user(cls, user):
"""Get the nickname for a user."""
return cls.get_account_for_user(user).nickname
@classmethod
def get_account_for_email(cls, email):
"""Get the Account for an email address, or return None."""
assert email
key = '<%s>' % email
return cls.get_by_key_name(key)
@classmethod
def get_accounts_for_emails(cls, emails):
"""Get the Accounts for each of a list of email addresses."""
return cls.get_by_key_name(['<%s>' % email for email in emails])
@classmethod
def get_by_key_name(cls, key, **kwds):
"""Override db.Model.get_by_key_name() to use cached value if possible."""
if not kwds and cls.current_user_account is not None:
if key == cls.current_user_account.key().name():
return cls.current_user_account
return super(Account, cls).get_by_key_name(key, **kwds)
@classmethod
def get_multiple_accounts_by_email(cls, emails):
"""Get multiple accounts. Returns a dict by email."""
results = {}
keys = []
for email in emails:
if cls.current_user_account and email == cls.current_user_account.email:
results[email] = cls.current_user_account
else:
keys.append('<%s>' % email)
if keys:
accounts = cls.get_by_key_name(keys)
for account in accounts:
if account is not None:
results[account.email] = account
return results
@classmethod
def get_nickname_for_email(cls, email, default=None):
"""Get the nickname for an email address, possibly a default.
If default is None a generic nickname is computed from the email
address.
Args:
email: email address.
default: If given and no account is found, returned as the default value.
Returns:
Nickname for given email.
"""
account = cls.get_account_for_email(email)
if account is not None and account.nickname:
return account.nickname
if default is not None:
return default
return email.replace('@', '_')
@classmethod
def get_account_for_nickname(cls, nickname):
"""Get the list of Accounts that have this nickname."""
assert nickname
assert '@' not in nickname
return cls.all().filter('lower_nickname =', nickname.lower()).get()
@classmethod
def get_email_for_nickname(cls, nickname):
"""Turn a nickname into an email address.
If the nickname is not unique or does not exist, this returns None.
"""
account = cls.get_account_for_nickname(nickname)
if account is None:
return None
return account.email
def user_has_selected_nickname(self):
"""Return True if the user picked the nickname.
Normally this returns 'not self.fresh', but if that property is
None, we assume that if the created and modified timestamp are
within 2 seconds, the account is fresh (i.e. the user hasn't
selected a nickname yet). We then also update self.fresh, so it
is used as a cache and may even be written back if we're lucky.
"""
if self.fresh is None:
delta = self.created - self.modified
# Simulate delta = abs(delta)
if delta.days < 0:
delta = -delta
self.fresh = (delta.days == 0 and delta.seconds < 2)
return not self.fresh
_drafts = None
@property
def drafts(self):
"""A list of issue ids that have drafts by this user.
This is cached in memcache.
"""
if self._drafts is None:
if self._initialize_drafts():
self._save_drafts()
return self._drafts
def update_drafts(self, issue, have_drafts=None):
"""Update the user's draft status for this issue.
Args:
issue: an Issue instance.
have_drafts: optional bool forcing the draft status. By default,
issue.num_drafts is inspected (which may query the datastore).
The Account is written to the datastore if necessary.
"""
dirty = False
if self._drafts is None:
dirty = self._initialize_drafts()
id = issue.key().id()
if have_drafts is None:
have_drafts = bool(issue.num_drafts) # Beware, this may do a query.
if have_drafts:
if id not in self._drafts:
self._drafts.append(id)
dirty = True
else:
if id in self._drafts:
self._drafts.remove(id)
dirty = True
if dirty:
self._save_drafts()
def _initialize_drafts(self):
"""Initialize self._drafts from scratch.
This mostly exists as a schema conversion utility.
Returns:
True if the user should call self._save_drafts(), False if not.
"""
drafts = memcache.get('user_drafts:' + self.email)
if drafts is not None:
self._drafts = drafts
##logging.info('HIT: %s -> %s', self.email, self._drafts)
return False
# We're looking for the Issue key id. The ancestry of comments goes:
# Issue -> PatchSet -> Patch -> Comment.
issue_ids = set(comment.key().parent().parent().parent().id()
for comment in gql(Comment,
'WHERE author = :1 AND draft = TRUE',
self.user))
self._drafts = list(issue_ids)
##logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)
return True
def _save_drafts(self):
"""Save self._drafts to memcache."""
##logging.info('SAVING: %s -> %s', self.email, self._drafts)
memcache.set('user_drafts:' + self.email, self._drafts, 3600)
def get_xsrf_token(self, offset=0):
"""Return an XSRF token for the current user."""
# This code assumes that
# self.user.email() == users.get_current_user().email()
current_user = users.get_current_user()
if self.user.user_id() != current_user.user_id():
# Mainly for Google Account plus conversion.
logging.info('Updating user_id for %s from %s to %s' % (
self.user.email(), self.user.user_id(), current_user.user_id()))
self.user = current_user
self.put()
if not self.xsrf_secret:
self.xsrf_secret = os.urandom(8)
self.put()
m = md5.new(self.xsrf_secret)
email_str = self.lower_email
if isinstance(email_str, unicode):
email_str = email_str.encode('utf-8')
m.update(self.lower_email)
when = int(time.time()) // 3600 + offset
m.update(str(when))
return m.hexdigest()
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import md5
from django.contrib.syndication.feeds import Feed
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Atom1Feed
import library
import models
class BaseFeed(Feed):
title = 'Code Review'
description = 'Rietveld: Code Review Tool hosted on Google App Engine'
feed_type = Atom1Feed
def link(self):
return reverse('codereview.views.index')
def author_name(self):
return 'rietveld'
def item_guid(self, item):
return 'urn:md5:%s' % (md5.new(str(item.key())).hexdigest())
def item_link(self, item):
if isinstance(item, models.PatchSet):
if item.data is not None:
return reverse('codereview.views.download',
args=[item.issue.key().id(),item.key().id()])
else:
# Patch set is too large, only the splitted diffs are available.
return reverse('codereview.views.show', args=[item.parent_key().id()])
if isinstance(item, models.Message):
return '%s#msg-%s' % (reverse('codereview.views.show',
args=[item.issue.key().id()]),
item.key())
return reverse('codereview.views.show', args=[item.key().id()])
def item_title(self, item):
return 'the title'
def item_author_name(self, item):
if isinstance(item, models.Issue):
return library.get_nickname(item.owner, True)
if isinstance(item, models.PatchSet):
return library.get_nickname(item.issue.owner, True)
if isinstance(item, models.Message):
return library.get_nickname(item.sender, True)
return 'Rietveld'
def item_pubdate(self, item):
if isinstance(item, models.Issue):
return item.modified
if isinstance(item, models.PatchSet):
# Use created, not modified, so that commenting on
# a patch set does not bump its place in the RSS feed.
return item.created
if isinstance(item, models.Message):
return item.date
return None
class BaseUserFeed(BaseFeed):
def get_object(self, bits):
"""Returns the account for the requested user feed.
bits is a list of URL path elements. The first element of this list
should be the user's nickname. A 404 is raised if the list is empty or
has more than one element or if the a user with that nickname
doesn't exist.
"""
if len(bits) != 1:
raise ObjectDoesNotExist
obj = bits[0]
account = models.Account.get_account_for_nickname('%s' % obj)
if account is None:
raise ObjectDoesNotExist
return account
class ReviewsFeed(BaseUserFeed):
title = 'Code Review - All issues I have to review'
def items(self, obj):
return _rss_helper(obj.email, 'closed = FALSE AND reviewers = :1',
use_email=True)
class ClosedFeed(BaseUserFeed):
title = "Code Review - Reviews closed by me"
def items(self, obj):
return _rss_helper(obj.email, 'closed = TRUE AND owner = :1')
class MineFeed(BaseUserFeed):
title = 'Code Review - My issues'
def items(self,obj):
return _rss_helper(obj.email, 'closed = FALSE AND owner = :1')
class AllFeed(BaseFeed):
title = 'Code Review - All issues'
def items(self):
query = models.Issue.gql('WHERE closed = FALSE AND private = FALSE '
'ORDER BY modified DESC')
return query.fetch(RSS_LIMIT)
class OneIssueFeed(BaseFeed):
title = 'Code Review'
def link(self):
return reverse('codereview.views.index')
def get_object(self, bits):
if len(bits) != 1:
raise ObjectDoesNotExist
obj = models.Issue.get_by_id(int(bits[0]))
if obj:
return obj
raise ObjectDoesNotExist
def title(self, obj):
return 'Code review - Issue %d: %s' % (obj.key().id(),obj.subject)
def items(self, obj):
all = list(obj.patchset_set) + list(obj.message_set)
all.sort(key=self.item_pubdate)
return all
### RSS feeds ###
# Maximum number of issues reported by RSS feeds
RSS_LIMIT = 20
def _rss_helper(email, query_string, use_email=False):
account = models.Account.get_account_for_email(email)
if account is None:
issues = []
else:
query = models.Issue.gql('WHERE %s AND private = FALSE '
'ORDER BY modified DESC' % query_string,
use_email and account.email or account.user)
issues = query.fetch(RSS_LIMIT)
return issues
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Intra-region diff utilities.
Intra-region diff highlights the blocks of code which have been changed or
deleted within a region. So instead of highlighting the whole region marked as
changed, the user can see what exactly was changed within that region.
Terminology:
'region' is a list of consecutive code lines.
'word' is the unit of intra-region diff. Its definition is arbitrary based on
what we think as to be a good unit of difference between two regions.
'block' is a small section of code within a region. It can span multiple
lines. There can be multiple non overlapping blocks within a region. A block
can potentially span the whole region.
The blocks have two representations. One is of the format (offset1, offset2,
size) which is returned by the SequenceMatcher to indicate a match of
length 'size' starting at offset1 in the first/old line and starting at offset2
in the second/new line. We convert this representation to a pair of tuples i.e.
(offset1, size) and (offset2, size) for rendering each side of the diff
separately. This latter representation is also more efficient for doing
compaction of adjacent blocks which reduces the size of the HTML markup. See
CompactBlocks for more details.
SequenceMatcher always returns one special matching block at the end with
contents (len(line1), len(line2), 0). We retain this special block as it
simplifies for loops in rendering the last non-matching block. All functions
which deal with the sequence of blocks assume presence of the special block at
the end of the sequence and retain it.
"""
import cgi
import difflib
import re
# Tag to begin a diff chunk.
BEGIN_TAG = "<span class=\"%s\">"
# Tag to end a diff block.
END_TAG = "</span>"
# Tag used for visual tab indication.
TAB_TAG = "<span class=\"visualtab\">»</span>"
# Color scheme to govern the display properties of diff blocks and matching
# blocks. Each value e.g. 'oldlight' corresponds to a CSS style.
COLOR_SCHEME = {
'old': {
'match': 'oldlight',
'diff': 'olddark',
'bckgrnd': 'oldlight',
},
'new': {
'match': 'newlight',
'diff': 'newdark',
'bckgrnd': 'newlight',
},
'oldmove': {
'match': 'movelight',
'diff': 'oldmovedark',
'bckgrnd': 'movelight'
},
'newmove': {
'match': 'newlight',
'diff': 'newdark',
'bckgrnd': 'newlight'
},
}
# Regular expressions to tokenize lines. Default is 'd'.
EXPRS = {
'a': r'(\w+|[^\w\s]+|\s+)',
'b': r'([A-Za-z0-9]+|[^A-Za-z0-9])',
'c': r'([A-Za-z0-9_]+|[^A-Za-z0-9_])',
'd': r'([^\W_]+|[\W_])',
}
# Maximum total characters in old and new lines for doing intra-region diffs.
# Intra-region diff for larger regions is hard to comprehend and wastes CPU
# time.
MAX_TOTAL_LEN = 10000
def _ExpandTabs(text, column, tabsize, mark_tabs=False):
"""Expand tab characters in a string into spaces.
Args:
text: a string containing tab characters.
column: the initial column for the first character in text
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if true, leave a tab character as the first character
of the expansion, so that the caller can find where
the tabs were.
Note that calling _ExpandTabs with mark_tabs=True is not idempotent.
"""
expanded = ""
while True:
tabpos = text.find("\t")
if tabpos < 0:
break
fillwidth = tabsize - (tabpos + column) % tabsize
column += tabpos + fillwidth
if mark_tabs:
fill = "\t" + " " * (fillwidth - 1)
else:
fill = " " * fillwidth
expanded += text[0:tabpos] + fill
text = text[tabpos+1:]
return expanded + text
def Break(text, offset=0, limit=80, brk="\n ", tabsize=8, mark_tabs=False):
"""Break text into lines.
Break text, which begins at column offset, each time it reaches
column limit.
To break the text, insert brk, which does not count toward
the column count of the next line and is assumed to be valid HTML.
During the text breaking process, replaces tabs with spaces up
to the next column that is a multiple of tabsize.
If mark_tabs is true, replace the first space of each expanded
tab with TAB_TAG.
Input and output are assumed to be in UTF-8; the computation is done
in Unicode. (Still not good enough if zero-width characters are
present.) If the input is not valid UTF-8, then the encoding is
passed through, potentially breaking up multi-byte characters.
We pass the line through cgi.escape before returning it.
A trailing newline is always stripped from the input first.
"""
assert tabsize > 0, tabsize
if text.endswith("\n"):
text = text[:-1]
try:
text = unicode(text, "utf-8")
except:
pass
# Expand all tabs.
# If mark_tabs is true, we retain one \t character as a marker during
# expansion so that we later replace it with an HTML snippet.
text = _ExpandTabs(text, offset, tabsize, mark_tabs)
# Perform wrapping.
if len(text) > limit - offset:
parts, text = [text[0:limit-offset]], text[limit-offset:]
while len(text) > limit:
parts.append(text[0:limit])
text = text[limit:]
parts.append(text);
text = brk.join([cgi.escape(p) for p in parts])
else:
text = cgi.escape(text)
# Colorize tab markers
text = text.replace("\t", TAB_TAG)
if isinstance(text, unicode):
return text.encode("utf-8", "replace")
return text
def CompactBlocks(blocks):
"""Compacts adjacent code blocks.
In many cases 2 adjacent blocks can be merged into one. This allows
to do some further processing on those blocks.
Args:
blocks: [(offset1, size), ...]
Returns:
A list with the same structure as the input with adjacent blocks
merged. However, the last block (which is always assumed to have
a zero size) is never merged. For example, the input
[(0, 2), (2, 8), (10, 5), (15, 0)]
will produce the output [(0, 15), (15, 0)].
"""
if len(blocks) == 1:
return blocks
result = [blocks[0]]
for block in blocks[1:-1]:
last_start, last_len = result[-1]
curr_start, curr_len = block
if last_start + last_len == curr_start:
result[-1] = last_start, last_len + curr_len
else:
result.append(block)
result.append(blocks[-1])
return result
def FilterBlocks(blocks, filter_func):
"""Gets rid of any blocks if filter_func evaluates false for them.
Args:
blocks: [(offset1, offset2, size), ...]; must have at least 1 entry
filter_func: a boolean function taking a single argument of the form
(offset1, offset2, size)
Returns:
A list with the same structure with entries for which filter_func()
returns false removed. However, the last block is always included.
"""
# We retain the 'special' block at the end.
res = [b for b in blocks[:-1] if filter_func(b)]
res.append(blocks[-1])
return res
def GetDiffParams(expr='d', min_match_ratio=0.6, min_match_size=2, dbg=False):
"""Returns a tuple of various parameters which affect intra region diffs.
Args:
expr: regular expression id to use to identify 'words' in the intra region
diff
min_match_ratio: minimum similarity between regions to qualify for intra
region diff
min_match_size: the smallest matching block size to use. Blocks smaller
than this are ignored.
dbg: to turn on generation of debugging information for the diff
Returns:
4 tuple (expr, min_match_ratio, min_match_size, dbg) that can be used to
customize diff. It can be passed to functions like WordDiff and
IntraLineDiff.
"""
assert expr in EXPRS
assert min_match_size in xrange(1, 5)
assert min_match_ratio > 0.0 and min_match_ratio < 1.0
return (expr, min_match_ratio, min_match_size, dbg)
def CanDoIRDiff(old_lines, new_lines):
"""Tells if it would be worth computing the intra region diff.
Calculating IR diff is costly and is usually helpful only for small regions.
We use a heuristic that if the total number of characters is more than a
certain threshold then we assume it is not worth computing the IR diff.
Args:
old_lines: an array of strings containing old text
new_lines: an array of strings containing new text
Returns:
True if we think it is worth computing IR diff for the region defined
by old_lines and new_lines, False otherwise.
TODO: Let GetDiffParams handle MAX_TOTAL_LEN param also.
"""
total_chars = (sum(len(line) for line in old_lines) +
sum(len(line) for line in new_lines))
return total_chars <= MAX_TOTAL_LEN
def WordDiff(line1, line2, diff_params):
"""Returns blocks with positions indiciating word level diffs.
Args:
line1: string representing the left part of the diff
line2: string representing the right part of the diff
diff_params: return value of GetDiffParams
Returns:
A tuple (blocks, ratio) where:
blocks: [(offset1, offset2, size), ...] such that
line1[offset1:offset1+size] == line2[offset2:offset2+size]
and the last block is always (len(line1), len(line2), 0)
ratio: a float giving the diff ratio computed by SequenceMatcher.
"""
match_expr, min_match_ratio, min_match_size, dbg = diff_params
exp = EXPRS[match_expr]
# Strings may have been left undecoded up to now. Assume UTF-8.
try:
line1 = unicode(line1, "utf8")
except:
pass
try:
line2 = unicode(line2, "utf8")
except:
pass
a = re.findall(exp, line1, re.U)
b = re.findall(exp, line2, re.U)
s = difflib.SequenceMatcher(None, a, b)
matching_blocks = s.get_matching_blocks()
ratio = s.ratio()
# Don't show intra region diffs if both lines are too different and there is
# more than one block of difference. If there is only one change then we
# still show the intra region diff regardless of how different the blocks
# are.
# Note: We compare len(matching_blocks) with 3 because one block of change
# results in 2 matching blocks. We add the one special block and we get 3
# matching blocks per one block of change.
if ratio < min_match_ratio and len(matching_blocks) > 3:
return ([(0, 0, 0)], ratio)
# For now convert to character level blocks because we already have
# the code to deal with folding across lines for character blocks.
# Create arrays lena an lenb which have cumulative word lengths
# corresponding to word positions in a and b
lena = []
last = 0
for w in a:
lena.append(last)
last += len(w)
lenb = []
last = 0
for w in b:
lenb.append(last)
last += len(w)
lena.append(len(line1))
lenb.append(len(line2))
# Convert to character blocks
blocks = []
for s1, s2, blen in matching_blocks[:-1]:
apos = lena[s1]
bpos = lenb[s2]
block_len = lena[s1+blen] - apos
blocks.append((apos, bpos, block_len))
# Recreate the special block.
blocks.append((len(line1), len(line2), 0))
# Filter any matching blocks which are smaller than the desired threshold.
# We don't remove matching blocks with only a newline character as doing so
# results in showing the matching newline character as non matching which
# doesn't look good.
blocks = FilterBlocks(blocks, lambda b: (b[2] >= min_match_size or
line1[b[0]:b[0]+b[2]] == '\n'))
return (blocks, ratio)
def IntraLineDiff(line1, line2, diff_params, diff_func=WordDiff):
"""Computes intraline diff blocks.
Args:
line1: string representing the left part of the diff
line2: string representing the right part of the diff
diff_params: return value of GetDiffParams
diff_func: a function whose signature matches that of WordDiff() above
Returns:
A tuple of (blocks1, blocks2) corresponding to line1 and line2.
Each element of the tuple is an array of (start_pos, length)
tuples denoting a diff block.
"""
blocks, ratio = diff_func(line1, line2, diff_params)
blocks1 = [(start1, length) for (start1, start2, length) in blocks]
blocks2 = [(start2, length) for (start1, start2, length) in blocks]
return (blocks1, blocks2, ratio)
def DumpDiff(blocks, line1, line2):
"""Helper function to debug diff related problems.
Args:
blocks: [(offset1, offset2, size), ...]
line1: string representing the left part of the diff
line2: string representing the right part of the diff
"""
for offset1, offset2, size in blocks:
print offset1, offset2, size
print offset1, size, ": ", line1[offset1:offset1+size]
print offset2, size, ": ", line2[offset2:offset2+size]
def RenderIntraLineDiff(blocks, line, tag, dbg_info=None, limit=80, indent=5,
tabsize=8, mark_tabs=False):
"""Renders the diff blocks returned by IntraLineDiff function.
Args:
blocks: [(start_pos, size), ...]
line: line of code on which the blocks are to be rendered.
tag: 'new' or 'old' to control the color scheme.
dbg_info: a string that holds debugging informaion header. Debug
information is rendered only if dbg_info is not None.
limit: folding limit to be passed to the Fold function.
indent: indentation size to be passed to the Fold function.
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if True, mark the first character of each expanded tab visually
Returns:
A tuple of two elements. First element is the rendered version of
the input 'line'. Second element tells if the line has a matching
newline character.
"""
res = ""
prev_start, prev_len = 0, 0
has_newline = False
debug_info = dbg_info
if dbg_info:
debug_info += "\nBlock Count: %d\nBlocks: " % (len(blocks) - 1)
for curr_start, curr_len in blocks:
if dbg_info and curr_len > 0:
debug_info += Fold("\n(%d, %d):|%s|" %
(curr_start, curr_len,
line[curr_start:curr_start+curr_len]),
limit, indent, tabsize, mark_tabs)
res += FoldBlock(line, prev_start + prev_len, curr_start, limit, indent,
tag, 'diff', tabsize, mark_tabs)
res += FoldBlock(line, curr_start, curr_start + curr_len, limit, indent,
tag, 'match', tabsize, mark_tabs)
# TODO: This test should be out of loop rather than inside. Once we
# filter out some junk from blocks (e.g. some empty blocks) we should do
# this test only on the last matching block.
if line[curr_start:curr_start+curr_len].endswith('\n'):
has_newline = True
prev_start, prev_len = curr_start, curr_len
return (res, has_newline, debug_info)
def FoldBlock(src, start, end, limit, indent, tag, btype, tabsize=8,
mark_tabs=False):
"""Folds and renders a block.
Args:
src: line of code
start: starting position of the block within 'src'.
end: ending position of the block within 'src'.
limit: folding limit
indent: indentation to use for folding.
tag: 'new' or 'old' to control the color scheme.
btype: block type i.e. 'match' or 'diff' to control the color schme.
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if True, mark the first character of each expanded tab visually
Returns:
A string representing the rendered block.
"""
text = src[start:end]
# We ignore newlines because we do newline management ourselves.
# Any other new lines with at the end will be stripped off by the Fold
# method.
if start >= end or text == '\n':
return ""
fbegin, lend, nl_plus_indent = GetTags(tag, btype, indent)
# 'bol' is beginning of line.
# The text we care about begins at byte offset start
# but if there are tabs it will have a larger column
# offset. Use len(_ExpandTabs()) to find out how many
# columns the starting prefix occupies.
offset_from_bol = len(_ExpandTabs(src[0:start], 0, tabsize)) % limit
text = Break(text, offset_from_bol, limit, lend + nl_plus_indent + fbegin, tabsize, mark_tabs)
if text:
text = fbegin + text + lend
# If this is the first block of the line and this is not the first line then
# insert newline + indent.
if offset_from_bol == 0 and not start == 0:
text = nl_plus_indent + text
return text
def GetTags(tag, btype, indent):
"""Returns various tags for rendering diff blocks.
Args:
tag: a key from COLOR_SCHEME
btype: 'match' or 'diff'
indent: indentation to use
Returns
A 3 tuple (begin_tag, end_tag, formatted_indent_block)
"""
assert tag in COLOR_SCHEME
assert btype in ['match', 'diff']
fbegin = BEGIN_TAG % COLOR_SCHEME[tag][btype]
bbegin = BEGIN_TAG % COLOR_SCHEME[tag]['bckgrnd']
lend = END_TAG
nl_plus_indent = '\n'
if indent > 0:
nl_plus_indent += bbegin + cgi.escape(" "*indent) + lend
return fbegin, lend, nl_plus_indent
def ConvertToSingleLine(lines):
"""Transforms a sequence of strings into a single line.
Returns the state that can be used to reconstruct the original lines with
the newline separators placed at the original place.
Args:
lines: sequence of strings
Returns:
Returns (single_line, state) tuple. 'state' shouldn't be modified by the
caller. It is only used to pass to other functions which will do certain
operations on this state.
'state' is an array containing a dictionary for each item in lines. Each
dictionary has two elements 'pos' and 'blocks'. 'pos' is the end position
of each line in the final converted string. 'blocks' is an array of blocks
for each line of code. These blocks are added using MarkBlock function.
"""
state = []
total_length = 0
for l in lines:
total_length += len(l)
# TODO: Use a tuple instead.
state.append({'pos': total_length, # the line split point
'blocks': [], # blocks which belong to this line
})
result = "".join(lines)
assert len(state) == len(lines)
return (result, state)
def MarkBlock(state, begin, end):
"""Marks a block on a region such that it doesn't cross line boundaries.
It is an operation that can be performed on the single line which was
returned by the ConvertToSingleLine function. This operation marks arbitrary
block [begin,end) on the text. It also ensures that if [begin,end) crosses
line boundaries in the original region then it splits the section up in 2 or
more blocks such that no block crosses the boundaries.
Args:
state: the state returned by ConvertToSingleLine function. The state
contained is modified by this function.
begin: Beginning of the block.
end: End of the block (exclusive).
Returns:
None.
"""
# TODO: Make sure already existing blocks don't overlap
if begin == end:
return
last_pos = 0
for entry in state:
pos = entry['pos']
if begin >= last_pos and begin < pos:
if end < pos:
# block doesn't cross any line boundary
entry['blocks'].append((begin, end))
else:
# block crosses the line boundary
entry['blocks'].append((begin, pos))
MarkBlock(state, pos, end)
break
last_pos = pos
def GetBlocks(state):
"""Returns all the blocks corresponding to the lines in the region.
Args:
state: the state returned by ConvertToSingleLine().
Returns:
An array of [(start_pos, length), ..] with an entry for each line in the
region.
"""
result = []
last_pos = 0
for entry in state:
pos = entry['pos']
# Calculate block start points from the beginning of individual lines.
blocks = [(s[0]-last_pos, s[1]-s[0]) for s in entry['blocks']]
# Add one end marker block.
blocks.append((pos-last_pos, 0))
result.append(blocks)
last_pos = pos
return result
def IntraRegionDiff(old_lines, new_lines, diff_params):
"""Computes intra region diff.
Args:
old_lines: array of strings
new_lines: array of strings
diff_params: return value of GetDiffParams
Returns:
A tuple (old_blocks, new_blocks) containing matching blocks for old and new
lines.
"""
old_line, old_state = ConvertToSingleLine(old_lines)
new_line, new_state = ConvertToSingleLine(new_lines)
old_blocks, new_blocks, ratio = IntraLineDiff(old_line, new_line, diff_params)
for begin, length in old_blocks:
MarkBlock(old_state, begin, begin+length)
old_blocks = GetBlocks(old_state)
for begin, length in new_blocks:
MarkBlock(new_state, begin, begin+length)
new_blocks = GetBlocks(new_state)
return (old_blocks, new_blocks, ratio)
def NormalizeBlocks(blocks, line):
"""Normalizes block representation of an intra line diff.
One diff can have multiple representations. Some times the diff returned by
the difflib for similar text sections is different even within same region.
For example if 2 already indented lines were indented with one additional
space character, the difflib may return the non matching space character to
be any of the already existing spaces. So one line may show non matching
space character as the first space character and the second line may show it
to be the last space character. This is sometimes confusing. This is the
side effect of the new regular expression we are using in WordDiff for
identifying indvidual words. This regular expression ('b') treats a sequence
of punctuation and whitespace characters as individual characters. It has
some visual advantages for showing a character level punctuation change as
one character change rather than a group of character change.
Making the normalization too generic can have performance implications. So
this implementation of normalize blocks intends to handle only one case.
Let's say S represents the space character and () marks a matching block.
Then the normalize operation will do following:
SSSS(SS)(ABCD) => SSSS(SS)(ABCD)
(SS)SSSS(ABCD) => SSSS(SS)(ABCD)
(SSSS)SS(ABCD) => SS(SSSS)(ABCD)
and so on..
Args:
blocks: An array of (offset, len) tuples defined on 'line'. These blocks
mark the matching areas. Anything between these matching blocks is
considered non-matching.
line: The text string on which the blocks are defined.
Returns:
An array of (offset, len) tuples representing the same diff but in
normalized form.
"""
result = []
prev_start, prev_len = blocks[0]
for curr_start, curr_len in blocks[1:]:
# Note: nm_ is a prefix for non matching and m_ is a prefix for matching.
m_len, nm_len = prev_len, curr_start - (prev_start+prev_len)
# This if condition checks if matching and non matching parts are greater
# than zero length and are comprised of spaces ONLY. The last condition
# deals with most of the observed cases of strange diffs.
# Note: curr_start - prev_start == m_l + nm_l
# So line[prev_start:curr_start] == matching_part + non_matching_part.
text = line[prev_start:curr_start]
if m_len > 0 and nm_len > 0 and text == ' ' * len(text):
# Move the matching block towards the end i.e. normalize.
result.append((prev_start + nm_len, m_len))
else:
# Keep the existing matching block.
result.append((prev_start, prev_len))
prev_start, prev_len = curr_start, curr_len
result.append(blocks[-1])
assert len(result) == len(blocks)
return result
def RenderIntraRegionDiff(lines, diff_blocks, tag, ratio, limit=80, indent=5,
tabsize=8, mark_tabs=False, dbg=False):
"""Renders intra region diff for one side.
Args:
lines: list of strings representing source code in the region
diff_blocks: blocks that were returned for this region by IntraRegionDiff()
tag: 'new' or 'old'
ratio: similarity ratio returned by the diff computing function
limit: folding limit
indent: indentation size
tabsize: tab stops occur at columns that are multiples of tabsize
mark_tabs: if True, mark the first character of each expanded tab visually
dbg: indicates if debug information should be rendered
Returns:
A list of strings representing the rendered version of each item in input
'lines'.
"""
result = []
dbg_info = None
if dbg:
dbg_info = 'Ratio: %.1f' % ratio
for line, blocks in zip(lines, diff_blocks):
blocks = NormalizeBlocks(blocks, line)
blocks = CompactBlocks(blocks)
diff = RenderIntraLineDiff(blocks,
line,
tag,
dbg_info=dbg_info,
limit=limit,
indent=indent,
tabsize=tabsize,
mark_tabs=mark_tabs)
result.append(diff)
assert len(result) == len(lines)
return result
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to read and apply a unified diff without forking patch(1).
For a discussion of the unified diff format, see my blog on Artima:
http://www.artima.com/weblogs/viewpost.jsp?thread=164293
"""
import difflib
import logging
import re
import sys
_CHUNK_RE = re.compile(r"""
@@
\s+
-
(?: (\d+) (?: , (\d+) )?)
\s+
\+
(?: (\d+) (?: , (\d+) )?)
\s+
@@
""", re.VERBOSE)
def PatchLines(old_lines, patch_lines, name="<patch>"):
"""Patches the old_lines with patches read from patch_lines.
This only reads unified diffs. The header lines are ignored.
Yields (tag, old, new) tuples where old and new are lists of lines.
The tag can either start with "error" or be a tag from difflib: "equal",
"insert", "delete", "replace". After "error" is yielded, no more
tuples are yielded. It is possible that consecutive "equal" tuples
are yielded.
"""
chunks = ParsePatchToChunks(patch_lines, name)
if chunks is None:
return iter([("error: ParsePatchToChunks failed", [], [])])
return PatchChunks(old_lines, chunks)
def PatchChunks(old_lines, chunks):
"""Patche old_lines with chunks.
Yields (tag, old, new) tuples where old and new are lists of lines.
The tag can either start with "error" or be a tag from difflib: "equal",
"insert", "delete", "replace". After "error" is yielded, no more
tuples are yielded. It is possible that consecutive "equal" tuples
are yielded.
"""
if not chunks:
# The patch is a no-op
yield ("equal", old_lines, old_lines)
return
old_pos = 0
for (old_i, old_j), (new_i, new_j), old_chunk, new_chunk in chunks:
eq = old_lines[old_pos:old_i]
if eq:
yield "equal", eq, eq
old_pos = old_i
# Check that the patch matches the target file
if old_lines[old_i:old_j] != old_chunk:
logging.warn("mismatch:%s.%s.", old_lines[old_i:old_j], old_chunk)
yield ("error: old chunk mismatch", old_lines[old_i:old_j], old_chunk)
return
# TODO(guido): ParsePatch knows the diff details, but throws the info away
sm = difflib.SequenceMatcher(None, old_chunk, new_chunk)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
yield tag, old_chunk[i1:i2], new_chunk[j1:j2]
old_pos = old_j
# Copy the final matching chunk if any.
eq = old_lines[old_pos:]
if eq:
yield ("equal", eq, eq)
def ParseRevision(lines):
"""Parse the revision number out of the raw lines of the patch.
Returns 0 (new file) if no revision number was found.
"""
for line in lines[:10]:
if line.startswith('@'):
break
m = re.match(r'---\s.*\(.*\s(\d+)\)\s*$', line)
if m:
return int(m.group(1))
return 0
_NO_NEWLINE_MESSAGE = "\\ No newline at end of file"
def ParsePatchToChunks(lines, name="<patch>"):
"""Parses a patch from a list of lines.
Return a list of chunks, where each chunk is a tuple:
old_range, new_range, old_lines, new_lines
Returns a list of chunks (possibly empty); or None if there's a problem.
"""
lineno = 0
raw_chunk = []
chunks = []
old_range = new_range = None
old_last = new_last = 0
in_prelude = True
for line in lines:
lineno += 1
if in_prelude:
# Skip leading lines until after we've seen one starting with '+++'
if line.startswith("+++"):
in_prelude = False
continue
match = _CHUNK_RE.match(line)
if match:
if raw_chunk:
# Process the lines in the previous chunk
old_chunk = []
new_chunk = []
for tag, rest in raw_chunk:
if tag in (" ", "-"):
old_chunk.append(rest)
if tag in (" ", "+"):
new_chunk.append(rest)
# Check consistency
old_i, old_j = old_range
new_i, new_j = new_range
if len(old_chunk) != old_j - old_i or len(new_chunk) != new_j - new_i:
logging.warn("%s:%s: previous chunk has incorrect length",
name, lineno)
return None
chunks.append((old_range, new_range, old_chunk, new_chunk))
raw_chunk = []
# Parse the @@ header
old_ln, old_n, new_ln, new_n = match.groups()
old_ln, old_n, new_ln, new_n = map(long,
(old_ln, old_n or 1,
new_ln, new_n or 1))
# Convert the numbers to list indices we can use
if old_n == 0:
old_i = old_ln
else:
old_i = old_ln - 1
old_j = old_i + old_n
old_range = old_i, old_j
if new_n == 0:
new_i = new_ln
else:
new_i = new_ln - 1
new_j =new_i + new_n
new_range = new_i, new_j
# Check header consistency with previous header
if old_i < old_last or new_i < new_last:
logging.warn("%s:%s: chunk header out of order: %r",
name, lineno, line)
return None
if old_i - old_last != new_i - new_last:
logging.warn("%s:%s: inconsistent chunk header: %r",
name, lineno, line)
return None
old_last = old_j
new_last = new_j
else:
tag, rest = line[0], line[1:]
if tag in (" ", "-", "+"):
raw_chunk.append((tag, rest))
elif line.startswith(_NO_NEWLINE_MESSAGE):
# TODO(guido): need to check that no more lines follow for this file
if raw_chunk:
last_tag, last_rest = raw_chunk[-1]
if last_rest.endswith("\n"):
raw_chunk[-1] = (last_tag, last_rest[:-1])
else:
# Only log if it's a non-blank line. Blank lines we see a lot.
if line and line.strip():
logging.warn("%s:%d: indecypherable input: %r", name, lineno, line)
if chunks or raw_chunk:
break # Trailing garbage isn't so bad
return None
if raw_chunk:
# Process the lines in the last chunk
old_chunk = []
new_chunk = []
for tag, rest in raw_chunk:
if tag in (" ", "-"):
old_chunk.append(rest)
if tag in (" ", "+"):
new_chunk.append(rest)
# Check consistency
old_i, old_j = old_range
new_i, new_j = new_range
if len(old_chunk) != old_j - old_i or len(new_chunk) != new_j - new_i:
print >>sys.stderr, ("%s:%s: last chunk has incorrect length" %
(name, lineno))
return None
chunks.append((old_range, new_range, old_chunk, new_chunk))
raw_chunk = []
return chunks
def ParsePatchToLines(lines):
"""Parses a patch from a list of lines.
Returns None on error, otherwise a list of 3-tuples:
(old_line_no, new_line_no, line)
A line number can be 0 if it doesn't exist in the old/new file.
"""
# TODO: can we share some of this code with ParsePatchToChunks?
result = []
in_prelude = True
for line in lines:
if in_prelude:
result.append((0, 0, line))
# Skip leading lines until after we've seen one starting with '+++'
if line.startswith("+++"):
in_prelude = False
elif line.startswith("@"):
result.append((0, 0, line))
match = _CHUNK_RE.match(line)
if not match:
logging.warn("ParsePatchToLines match failed on %s", line)
return None
old_ln = int(match.groups()[0])
new_ln = int(match.groups()[2])
else:
if line[0] == "-":
result.append((old_ln, 0, line))
old_ln += 1
elif line[0] == "+":
result.append((0, new_ln, line))
new_ln += 1
elif line[0] == " ":
result.append((old_ln, new_ln, line))
old_ln += 1
new_ln += 1
elif line.startswith(_NO_NEWLINE_MESSAGE):
continue
else: # Something else, could be property changes etc.
result.append((0, 0, line))
return result
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""URL mappings for the codereview package."""
# NOTE: Must import *, since Django looks for things here, e.g. handler500.
from django.conf.urls.defaults import *
import django.views.defaults
from codereview import feeds
urlpatterns = patterns(
'codereview.views',
(r'^$', 'index'),
(r'^all$', 'all'),
(r'^mine$', 'mine'),
(r'^starred$', 'starred'),
(r'^new$', 'new'),
(r'^upload$', 'upload'),
(r'^(\d+)$', 'show', {}, 'show_bare_issue_number'),
(r'^(\d+)/(show)?$', 'show'),
(r'^(\d+)/add$', 'add'),
(r'^(\d+)/edit$', 'edit'),
(r'^(\d+)/delete$', 'delete'),
(r'^(\d+)/close$', 'close'),
(r'^(\d+)/mail$', 'mailissue'),
(r'^(\d+)/publish$', 'publish'),
(r'^download/issue(\d+)_(\d+)\.diff', 'download'),
(r'^download/issue(\d+)_(\d+)_(\d+)\.diff', 'download_patch'),
(r'^(\d+)/patch/(\d+)/(\d+)$', 'patch'),
(r'^(\d+)/image/(\d+)/(\d+)/(\d+)$', 'image'),
(r'^(\d+)/diff/(\d+)/(.+)$', 'diff'),
(r'^(\d+)/diff2/(\d+):(\d+)/(.+)$', 'diff2'),
(r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$',
'diff_skipped_lines'),
(r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/$',
django.views.defaults.page_not_found, {}, 'diff_skipped_lines_prefix'),
(r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$',
'diff2_skipped_lines'),
(r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/$',
django.views.defaults.page_not_found, {}, 'diff2_skipped_lines_prefix'),
(r'^(\d+)/upload_content/(\d+)/(\d+)$', 'upload_content'),
(r'^(\d+)/upload_patch/(\d+)$', 'upload_patch'),
(r'^(\d+)/description$', 'description'),
(r'^(\d+)/fields', 'fields'),
(r'^(\d+)/star$', 'star'),
(r'^(\d+)/unstar$', 'unstar'),
(r'^(\d+)/draft_message$', 'draft_message'),
(r'^api/(\d+)/?$', 'api_issue'),
(r'^api/(\d+)/(\d+)/?$', 'api_patchset'),
(r'^user/(.+)$', 'show_user'),
(r'^inline_draft$', 'inline_draft'),
(r'^repos$', 'repos'),
(r'^repo_new$', 'repo_new'),
(r'^repo_init$', 'repo_init'),
(r'^branch_new/(\d+)$', 'branch_new'),
(r'^branch_edit/(\d+)$', 'branch_edit'),
(r'^branch_delete/(\d+)$', 'branch_delete'),
(r'^settings$', 'settings'),
(r'^account_delete$', 'account_delete'),
(r'^user_popup/(.+)$', 'user_popup'),
(r'^(\d+)/patchset/(\d+)$', 'patchset'),
(r'^(\d+)/patchset/(\d+)/delete$', 'delete_patchset'),
(r'^account$', 'account'),
(r'^use_uploadpy$', 'use_uploadpy'),
(r'^_ah/xmpp/message/chat/', 'incoming_chat'),
(r'^_ah/mail/(.*)', 'incoming_mail'),
(r'^xsrf_token$', 'xsrf_token'),
# patching upload.py on the fly
(r'^static/upload.py$', 'customized_upload_py'),
(r'^search$', 'search'),
)
feed_dict = {
'reviews': feeds.ReviewsFeed,
'closed': feeds.ClosedFeed,
'mine' : feeds.MineFeed,
'all': feeds.AllFeed,
'issue' : feeds.OneIssueFeed,
}
urlpatterns += patterns(
'',
(r'^rss/(?P<url>.*)$', 'django.contrib.syndication.views.feed',
{'feed_dict': feed_dict}),
)
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom middleware. Some of this may be generally useful."""
from google.appengine.api import users
import models
class AddUserToRequestMiddleware(object):
"""Add a user object and a user_is_admin flag to each request."""
def process_request(self, request):
request.user = users.get_current_user()
request.user_is_admin = users.is_current_user_admin()
# Update the cached value of the current user's Account
account = None
if request.user is not None:
account = models.Account.get_account_for_user(request.user)
models.Account.current_user_account = account
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django template library for Rietveld."""
import cgi
import logging
from google.appengine.api import memcache
from google.appengine.api import users
import django.template
import django.utils.safestring
from django.core.urlresolvers import reverse
import models
register = django.template.Library()
user_cache = {}
def get_links_for_users(user_emails):
"""Return a dictionary of email->link to user page and fill caches."""
link_dict = {}
remaining_emails = set(user_emails)
# initialize with email usernames
for email in remaining_emails:
nick = email.split('@', 1)[0]
link_dict[email] = cgi.escape(nick)
# look in the local cache
for email in remaining_emails:
if email in user_cache:
link_dict[email] = user_cache[email]
remaining_emails = remaining_emails - set(user_cache)
if not remaining_emails:
return link_dict
# then look in memcache
memcache_results = memcache.get_multi(remaining_emails,
key_prefix="show_user:")
for email in memcache_results:
link_dict[email] = memcache_results[email]
user_cache[email] = memcache_results[email]
remaining_emails = remaining_emails - set(memcache_results)
if not remaining_emails:
return link_dict
# and finally hit the datastore
accounts = models.Account.get_accounts_for_emails(remaining_emails)
for account in accounts:
if account and account.user_has_selected_nickname:
ret = ('<a href="%s" onMouseOver="M_showUserInfoPopup(this)">%s</a>' %
(reverse('codereview.views.show_user', args=[account.nickname]),
cgi.escape(account.nickname)))
link_dict[account.email] = ret
datastore_results = dict((e, link_dict[e]) for e in remaining_emails)
memcache.set_multi(datastore_results, 300, key_prefix='show_user:')
user_cache.update(datastore_results)
return link_dict
def get_link_for_user(email):
"""Get a link to a user's profile page."""
links = get_links_for_users([email])
return links[email]
@register.filter
def show_user(email, arg=None, autoescape=None, memcache_results=None):
"""Render a link to the user's dashboard, with text being the nickname."""
if isinstance(email, users.User):
email = email.email()
if not arg:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
ret = get_link_for_user(email)
return django.utils.safestring.mark_safe(ret)
@register.filter
def show_users(email_list, arg=None):
"""Render list of links to each user's dashboard."""
new_email_list = []
for email in email_list:
if isinstance(email, users.User):
email = email.email()
new_email_list.append(email)
links = get_links_for_users(new_email_list)
if not arg:
user = users.get_current_user()
if user is not None:
links[user.email()] = 'me'
return django.utils.safestring.mark_safe(', '.join(
links[email] for email in email_list))
class UrlAppendViewSettingsNode(django.template.Node):
"""Django template tag that appends context and column_width parameter.
This tag should be used after any URL that requires view settings.
Example:
<a href='{%url /foo%}{%urlappend_view_settings%}'>
The tag tries to get the current column width and context from the
template context and if they're present it returns '?param1¶m2'
otherwise it returns an empty string.
"""
def __init__(self):
"""Constructor."""
self.view_context = django.template.Variable('context')
self.view_colwidth = django.template.Variable('column_width')
def render(self, context):
"""Returns a HTML fragment."""
url_params = []
current_context = -1
try:
current_context = self.view_context.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_context is None:
url_params.append('context=')
elif isinstance(current_context, int) and current_context > 0:
url_params.append('context=%d' % current_context)
current_colwidth = None
try:
current_colwidth = self.view_colwidth.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_colwidth is not None:
url_params.append('column_width=%d' % current_colwidth)
if url_params:
return '?%s' % '&'.join(url_params)
return ''
@register.tag
def urlappend_view_settings(parser, token):
"""The actual template tag."""
return UrlAppendViewSettingsNode()
def get_nickname(email, never_me=False, request=None):
"""Return a nickname for an email address.
If 'never_me' is True, 'me' is not returned if 'email' belongs to the
current logged in user. If 'request' is a HttpRequest, it is used to
cache the nickname returned by models.Account.get_nickname_for_email().
"""
if isinstance(email, users.User):
email = email.email()
if not never_me:
if request is not None:
user = request.user
else:
user = users.get_current_user()
if user is not None and email == user.email():
return 'me'
if request is None:
return models.Account.get_nickname_for_email(email)
else:
if getattr(request, '_nicknames', None) is None:
request._nicknames = {}
if email in request._nicknames:
return request._nicknames[email]
result = models.Account.get_nickname_for_email(email)
request._nicknames[email] = result
return result
class NicknameNode(django.template.Node):
"""Renders a nickname for a given email address.
The return value is cached if a HttpRequest is available in a
'request' template variable.
The template tag accepts one or two arguments. The first argument is
the template variable for the email address. If the optional second
argument evaluates to True, 'me' as nickname is never rendered.
Example usage:
{% cached_nickname msg.sender %}
{% cached_nickname msg.sender True %}
"""
def __init__(self, email_address, never_me=''):
"""Constructor.
'email_address' is the name of the template variable that holds an
email address. If 'never_me' evaluates to True, 'me' won't be returned.
"""
self.email_address = django.template.Variable(email_address)
self.never_me = bool(never_me.strip())
self.is_multi = False
def render(self, context):
try:
email = self.email_address.resolve(context)
except django.template.VariableDoesNotExist:
return ''
request = context.get('request')
if self.is_multi:
return ', '.join(get_nickname(e, self.never_me, request) for e in email)
return get_nickname(email, self.never_me, request)
@register.tag
def nickname(parser, token):
"""Almost the same as nickname filter but the result is cached."""
try:
tag_name, email_address, never_me = token.split_contents()
except ValueError:
try:
tag_name, email_address = token.split_contents()
never_me = ''
except ValueError:
raise django.template.TemplateSyntaxError(
"%r requires exactly one or two arguments" % token.contents.split()[0])
return NicknameNode(email_address, never_me)
@register.tag
def nicknames(parser, token):
"""Wrapper for nickname tag with is_multi flag enabled."""
node = nickname(parser, token)
node.is_multi = True
return node
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Rietveld."""
### Imports ###
# Python imports
import binascii
import datetime
import email # see incoming_mail()
import email.utils
import logging
import md5
import os
import random
import re
import urllib
from cStringIO import StringIO
from xml.etree import ElementTree
# AppEngine imports
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.api import xmpp
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
from google.appengine.runtime import DeadlineExceededError
from google.appengine.runtime import apiproxy_errors
# Django imports
# TODO(guido): Don't import classes/functions directly.
from django import forms
# Import settings as django_settings to avoid name conflict with settings().
from django.conf import settings as django_settings
from django.http import HttpResponse, HttpResponseRedirect
from django.http import HttpResponseForbidden, HttpResponseNotFound
from django.http import HttpResponseBadRequest
from django.shortcuts import render_to_response
import django.template
from django.template import RequestContext
from django.utils import simplejson
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
# Local imports
import models
import engine
import library
import patching
# Add our own template library.
_library_name = __name__.rsplit('.', 1)[0] + '.library'
if not django.template.libraries.get(_library_name, None):
django.template.add_to_builtins(_library_name)
### Constants ###
IS_DEV = os.environ['SERVER_SOFTWARE'].startswith('Dev') # Development server
### Form classes ###
class AccountInput(forms.TextInput):
# Associates the necessary css/js files for the control. See
# http://docs.djangoproject.com/en/dev/topics/forms/media/.
#
# Don't forget to place {{formname.media}} into html header
# when using this html control.
class Media:
css = {
'all': ('autocomplete/jquery.autocomplete.css',)
}
js = (
'autocomplete/lib/jquery.js',
'autocomplete/lib/jquery.bgiframe.min.js',
'autocomplete/lib/jquery.ajaxQueue.js',
'autocomplete/jquery.autocomplete.js'
)
def render(self, name, value, attrs=None):
output = super(AccountInput, self).render(name, value, attrs)
if models.Account.current_user_account is not None:
# TODO(anatoli): move this into .js media for this form
data = {'name': name, 'url': reverse(account),
'multiple': 'true'}
if self.attrs.get('multiple', True) == False:
data['multiple'] = 'false'
output += mark_safe(u'''
<script type="text/javascript">
jQuery("#id_%(name)s").autocomplete("%(url)s", {
max: 10,
highlight: false,
multiple: %(multiple)s,
multipleSeparator: ", ",
scroll: true,
scrollHeight: 300,
matchContains: true,
formatResult : function(row) {
return row[0].replace(/ .+/gi, '');
}
});
</script>''' % data)
return output
class IssueBaseForm(forms.Form):
subject = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'size': 60}))
description = forms.CharField(required=False,
max_length=10000,
widget=forms.Textarea(attrs={'cols': 60}))
branch = forms.ChoiceField(required=False, label='Base URL')
base = forms.CharField(required=False,
max_length=1000,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(required=False,
max_length=1000,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=2000,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
private = forms.BooleanField(required=False, initial=False)
def set_branch_choices(self, base=None):
branches = models.Branch.all()
bound_field = self['branch']
choices = []
default = None
for b in branches:
if not b.repo_name:
b.repo_name = b.repo.name
b.put()
pair = (b.key(), '%s - %s - %s' % (b.repo_name, b.category, b.name))
choices.append(pair)
if default is None and (base is None or b.url == base):
default = b.key()
choices.sort(key=lambda pair: pair[1].lower())
choices.insert(0, ('', '[See Base]'))
bound_field.field.choices = choices
if default is not None:
self.initial['branch'] = default
def get_base(self):
base = self.cleaned_data.get('base')
if not base:
key = self.cleaned_data['branch']
if key:
branch = models.Branch.get(key)
if branch is not None:
base = branch.url
if not base:
self.errors['base'] = ['You must specify a base']
return base or None
class NewForm(IssueBaseForm):
data = forms.FileField(required=False)
url = forms.URLField(required=False,
max_length=2083,
widget=forms.TextInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False, initial=True)
class AddForm(forms.Form):
message = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'size': 60}))
data = forms.FileField(required=False)
url = forms.URLField(required=False,
max_length=2083,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(max_length=1000, required=False,
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False, initial=True)
class UploadForm(forms.Form):
subject = forms.CharField(max_length=100)
description = forms.CharField(max_length=10000, required=False)
content_upload = forms.BooleanField(required=False)
separate_patches = forms.BooleanField(required=False)
base = forms.CharField(max_length=2000, required=False)
data = forms.FileField(required=False)
issue = forms.IntegerField(required=False)
description = forms.CharField(max_length=10000, required=False)
reviewers = forms.CharField(max_length=1000, required=False)
cc = forms.CharField(max_length=1000, required=False)
private = forms.BooleanField(required=False, initial=False)
send_mail = forms.BooleanField(required=False)
base_hashes = forms.CharField(required=False)
def clean_base(self):
base = self.cleaned_data.get('base')
if not base and not self.cleaned_data.get('content_upload', False):
raise forms.ValidationError, 'Base URL is required.'
return self.cleaned_data.get('base')
def get_base(self):
return self.cleaned_data.get('base')
class UploadContentForm(forms.Form):
filename = forms.CharField(max_length=255)
status = forms.CharField(required=False, max_length=20)
checksum = forms.CharField(max_length=32)
file_too_large = forms.BooleanField(required=False)
is_binary = forms.BooleanField(required=False)
is_current = forms.BooleanField(required=False)
def clean(self):
# Check presence of 'data'. We cannot use FileField because
# it disallows empty files.
super(UploadContentForm, self).clean()
if not self.files and 'data' not in self.files:
raise forms.ValidationError, 'No content uploaded.'
return self.cleaned_data
def get_uploaded_content(self):
return self.files['data'].read()
class UploadPatchForm(forms.Form):
filename = forms.CharField(max_length=255)
content_upload = forms.BooleanField(required=False)
def get_uploaded_patch(self):
return self.files['data'].read()
class EditForm(IssueBaseForm):
closed = forms.BooleanField(required=False)
class EditLocalBaseForm(forms.Form):
subject = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'size': 60}))
description = forms.CharField(required=False,
max_length=10000,
widget=forms.Textarea(attrs={'cols': 60}))
reviewers = forms.CharField(required=False,
max_length=1000,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=1000,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
private = forms.BooleanField(required=False, initial=False)
closed = forms.BooleanField(required=False)
def get_base(self):
return None
class RepoForm(djangoforms.ModelForm):
class Meta:
model = models.Repository
exclude = ['owner']
class BranchForm(djangoforms.ModelForm):
class Meta:
model = models.Branch
exclude = ['owner', 'repo_name']
class PublishForm(forms.Form):
subject = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'size': 60}))
reviewers = forms.CharField(required=False,
max_length=1000,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=1000,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False)
message = forms.CharField(required=False,
max_length=10000,
widget=forms.Textarea(attrs={'cols': 60}))
message_only = forms.BooleanField(required=False,
widget=forms.HiddenInput())
no_redirect = forms.BooleanField(required=False,
widget=forms.HiddenInput())
class MiniPublishForm(forms.Form):
reviewers = forms.CharField(required=False,
max_length=1000,
widget=AccountInput(attrs={'size': 60}))
cc = forms.CharField(required=False,
max_length=1000,
label = 'CC',
widget=AccountInput(attrs={'size': 60}))
send_mail = forms.BooleanField(required=False)
message = forms.CharField(required=False,
max_length=10000,
widget=forms.Textarea(attrs={'cols': 60}))
message_only = forms.BooleanField(required=False,
widget=forms.HiddenInput())
no_redirect = forms.BooleanField(required=False,
widget=forms.HiddenInput())
FORM_CONTEXT_VALUES = [(x, '%d lines' % x) for x in models.CONTEXT_CHOICES]
FORM_CONTEXT_VALUES.append(('', 'Whole file'))
class SettingsForm(forms.Form):
nickname = forms.CharField(max_length=30)
context = forms.IntegerField(
widget=forms.Select(choices=FORM_CONTEXT_VALUES),
required=False,
label='Context')
column_width = forms.IntegerField(initial=engine.DEFAULT_COLUMN_WIDTH,
min_value=engine.MIN_COLUMN_WIDTH,
max_value=engine.MAX_COLUMN_WIDTH)
notify_by_email = forms.BooleanField(required=False,
widget=forms.HiddenInput())
notify_by_chat = forms.BooleanField(
required=False,
help_text='You must accept the invite for this to work.')
def clean_nickname(self):
nickname = self.cleaned_data.get('nickname')
# Check for allowed characters
match = re.match(r'[\w\.\-_\(\) ]+$', nickname, re.UNICODE|re.IGNORECASE)
if not match:
raise forms.ValidationError('Allowed characters are letters, digits, '
'".-_()" and spaces.')
# Check for sane whitespaces
if re.search(r'\s{2,}', nickname):
raise forms.ValidationError('Use single spaces between words.')
if len(nickname) != len(nickname.strip()):
raise forms.ValidationError('Leading and trailing whitespaces are '
'not allowed.')
if nickname.lower() == 'me':
raise forms.ValidationError('Choose a different nickname.')
# Look for existing nicknames
accounts = list(models.Account.gql('WHERE lower_nickname = :1',
nickname.lower()))
for account in accounts:
if account.key() == models.Account.current_user_account.key():
continue
raise forms.ValidationError('This nickname is already in use.')
return nickname
class SearchForm(forms.Form):
format = forms.ChoiceField(
required=False,
choices=(
('html', 'html'),
('json', 'json')),
widget=forms.HiddenInput(attrs={'value': 'html'}))
keys_only = forms.BooleanField(
required=False,
widget=forms.HiddenInput(attrs={'value': 'False'}))
with_messages = forms.BooleanField(
required=False,
widget=forms.HiddenInput(attrs={'value': 'False'}))
cursor = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={'value': ''}))
limit = forms.IntegerField(
required=False,
min_value=1,
max_value=1000,
initial=10,
widget=forms.HiddenInput(attrs={'value': '10'}))
closed = forms.NullBooleanField(required=False)
owner = forms.CharField(required=False,
max_length=1000,
widget=AccountInput(attrs={'size': 60,
'multiple': False}))
reviewer = forms.CharField(required=False,
max_length=1000,
widget=AccountInput(attrs={'size': 60,
'multiple': False}))
base = forms.CharField(required=False, max_length=550)
private = forms.NullBooleanField(required=False)
def _clean_accounts(self, key):
"""Cleans up autocomplete field.
The input is validated to be zero or one name/email and it's
validated that the users exists.
Args:
key: the field name.
Returns an User instance or raises ValidationError.
"""
accounts = filter(None,
(x.strip()
for x in self.cleaned_data.get(key, '').split(',')))
if len(accounts) > 1:
raise forms.ValidationError('Only one user name is allowed.')
elif not accounts:
return None
account = accounts[0]
if '@' in account:
acct = models.Account.get_account_for_email(account)
else:
acct = models.Account.get_account_for_nickname(account)
if not acct:
raise forms.ValidationError('Unknown user')
return acct.user
def clean_owner(self):
return self._clean_accounts('owner')
def clean_reviewer(self):
user = self._clean_accounts('reviewer')
if user:
return user.email()
### Exceptions ###
class InvalidIncomingEmailError(Exception):
"""Exception raised by incoming mail handler when a problem occurs."""
### Helper functions ###
# Counter displayed (by respond()) below) on every page showing how
# many requests the current incarnation has handled, not counting
# redirects. Rendered by templates/base.html.
counter = 0
def respond(request, template, params=None):
"""Helper to render a response, passing standard stuff to the response.
Args:
request: The request object.
template: The template name; '.html' is appended automatically.
params: A dict giving the template parameters; modified in-place.
Returns:
Whatever render_to_response(template, params) returns.
Raises:
Whatever render_to_response(template, params) raises.
"""
global counter
counter += 1
if params is None:
params = {}
must_choose_nickname = False
uploadpy_hint = False
if request.user is not None:
account = models.Account.current_user_account
must_choose_nickname = not account.user_has_selected_nickname()
uploadpy_hint = account.uploadpy_hint
params['request'] = request
params['counter'] = counter
params['user'] = request.user
params['is_admin'] = request.user_is_admin
params['is_dev'] = IS_DEV
params['media_url'] = django_settings.MEDIA_URL
full_path = request.get_full_path().encode('utf-8')
if request.user is None:
params['sign_in'] = users.create_login_url(full_path)
else:
params['sign_out'] = users.create_logout_url(full_path)
account = models.Account.current_user_account
if account is not None:
params['xsrf_token'] = account.get_xsrf_token()
params['must_choose_nickname'] = must_choose_nickname
params['uploadpy_hint'] = uploadpy_hint
params['rietveld_revision'] = django_settings.RIETVELD_REVISION
try:
return render_to_response(template, params,
context_instance=RequestContext(request))
except DeadlineExceededError:
logging.exception('DeadlineExceededError')
return HttpResponse('DeadlineExceededError', status=503)
except apiproxy_errors.CapabilityDisabledError, err:
logging.exception('CapabilityDisabledError: %s', err)
return HttpResponse('Rietveld: App Engine is undergoing maintenance. '
'Please try again in a while. ' + str(err),
status=503)
except MemoryError:
logging.exception('MemoryError')
return HttpResponse('MemoryError', status=503)
except AssertionError:
logging.exception('AssertionError')
return HttpResponse('AssertionError')
finally:
library.user_cache.clear() # don't want this sticking around
def _random_bytes(n):
"""Helper returning a string of random bytes of given length."""
return ''.join(map(chr, (random.randrange(256) for i in xrange(n))))
def _clean_int(value, default, min_value=None, max_value=None):
"""Helper to cast value to int and to clip it to min or max_value.
Args:
value: Any value (preferably something that can be casted to int).
default: Default value to be used when type casting fails.
min_value: Minimum allowed value (default: None).
max_value: Maximum allowed value (default: None).
Returns:
An integer between min_value and max_value.
"""
if not isinstance(value, (int, long)):
try:
value = int(value)
except (TypeError, ValueError), err:
value = default
if min_value is not None:
value = max(min_value, value)
if max_value is not None:
value = min(value, max_value)
return value
def _can_view_issue(user, issue):
if user is None:
return not issue.private
user_email = db.Email(user.email().lower())
return (not issue.private
or issue.owner == user
or user_email in issue.cc
or user_email in issue.reviewers)
def _notify_issue(request, issue, message):
"""Try sending an XMPP (chat) message.
Args:
request: The request object.
issue: Issue whose owner, reviewers, CC are to be notified.
message: Text of message to send, e.g. 'Created'.
The current user and the issue's subject and URL are appended to the message.
Returns:
True if the message was (apparently) delivered, False if not.
"""
iid = issue.key().id()
emails = [issue.owner.email()]
if issue.reviewers:
emails.extend(issue.reviewers)
if issue.cc:
emails.extend(issue.cc)
accounts = models.Account.get_multiple_accounts_by_email(emails)
jids = []
for account in accounts.itervalues():
logging.debug('email=%r,chat=%r', account.email, account.notify_by_chat)
if account.notify_by_chat:
jids.append(account.email)
if not jids:
logging.debug('No XMPP jids to send to for issue %d', iid)
return True # Nothing to do.
jids_str = ', '.join(jids)
logging.debug('Sending XMPP for issue %d to %s', iid, jids_str)
sender = '?'
if models.Account.current_user_account:
sender = models.Account.current_user_account.nickname
elif request.user:
sender = request.user.email()
message = '%s by %s: %s\n%s' % (message,
sender,
issue.subject,
request.build_absolute_uri(
reverse(show, args=[iid])))
try:
sts = xmpp.send_message(jids, message)
except Exception, err:
logging.exception('XMPP exception %s sending for issue %d to %s',
err, iid, jids_str)
return False
else:
if sts == [xmpp.NO_ERROR] * len(jids):
logging.info('XMPP message sent for issue %d to %s', iid, jids_str)
return True
else:
logging.error('XMPP error %r sending for issue %d to %s',
sts, iid, jids_str)
return False
### Decorators for request handlers ###
def post_required(func):
"""Decorator that returns an error unless request.method == 'POST'."""
def post_wrapper(request, *args, **kwds):
if request.method != 'POST':
return HttpResponse('This requires a POST request.', status=405)
return func(request, *args, **kwds)
return post_wrapper
def login_required(func):
"""Decorator that redirects to the login page if you're not logged in."""
def login_wrapper(request, *args, **kwds):
if request.user is None:
return HttpResponseRedirect(
users.create_login_url(request.get_full_path().encode('utf-8')))
return func(request, *args, **kwds)
return login_wrapper
def xsrf_required(func):
"""Decorator to check XSRF token.
This only checks if the method is POST; it lets other method go
through unchallenged. Apply after @login_required and (if
applicable) @post_required. This decorator is mutually exclusive
with @upload_required.
"""
def xsrf_wrapper(request, *args, **kwds):
if request.method == 'POST':
post_token = request.POST.get('xsrf_token')
if not post_token:
return HttpResponse('Missing XSRF token.', status=403)
account = models.Account.current_user_account
if not account:
return HttpResponse('Must be logged in for XSRF check.', status=403)
xsrf_token = account.get_xsrf_token()
if post_token != xsrf_token:
# Try the previous hour's token
xsrf_token = account.get_xsrf_token(-1)
if post_token != xsrf_token:
return HttpResponse('Invalid XSRF token.', status=403)
return func(request, *args, **kwds)
return xsrf_wrapper
def upload_required(func):
"""Decorator for POST requests from the upload.py script.
Right now this is for documentation only, but eventually we should
change this to insist on a special header that JavaScript cannot
add, to prevent XSRF attacks on these URLs. This decorator is
mutually exclusive with @xsrf_required.
"""
return func
def admin_required(func):
"""Decorator that insists that you're logged in as administratior."""
def admin_wrapper(request, *args, **kwds):
if request.user is None:
return HttpResponseRedirect(
users.create_login_url(request.get_full_path().encode('utf-8')))
if not request.user_is_admin:
return HttpResponseForbidden('You must be admin in for this function')
return func(request, *args, **kwds)
return admin_wrapper
def issue_required(func):
"""Decorator that processes the issue_id handler argument."""
def issue_wrapper(request, issue_id, *args, **kwds):
issue = models.Issue.get_by_id(int(issue_id))
if issue is None:
return HttpResponseNotFound('No issue exists with that id (%s)' %
issue_id)
if issue.private:
if request.user is None:
return HttpResponseRedirect(
users.create_login_url(request.get_full_path().encode('utf-8')))
if not _can_view_issue(request.user, issue):
return HttpResponseForbidden('You do not have permission to '
'view this issue')
request.issue = issue
return func(request, *args, **kwds)
return issue_wrapper
def user_key_required(func):
"""Decorator that processes the user handler argument."""
def user_key_wrapper(request, user_key, *args, **kwds):
user_key = urllib.unquote(user_key)
if '@' in user_key:
request.user_to_show = users.User(user_key)
else:
account = models.Account.get_account_for_nickname(user_key)
if not account:
logging.info("account not found for nickname %s" % user_key)
return HttpResponseNotFound('No user found with that key (%s)' %
urllib.quote(user_key))
request.user_to_show = account.user
return func(request, *args, **kwds)
return user_key_wrapper
def owner_required(func):
"""Decorator that insists you own the issue.
It must appear after issue_required or equivalent, like patchset_required.
"""
@login_required
def owner_wrapper(request, *args, **kwds):
if request.issue.owner != request.user:
return HttpResponseForbidden('You do not own this issue')
return func(request, *args, **kwds)
return owner_wrapper
def issue_owner_required(func):
"""Decorator that processes the issue_id argument and insists you own it."""
@issue_required
@owner_required
def issue_owner_wrapper(request, *args, **kwds):
return func(request, *args, **kwds)
return issue_owner_wrapper
def issue_editor_required(func):
"""Decorator that processes the issue_id argument and insists the user has
permission to edit it."""
@login_required
@issue_required
def issue_editor_wrapper(request, *args, **kwds):
if not request.issue.user_can_edit(request.user):
return HttpResponseForbidden('You do not have permission to '
'edit this issue')
return func(request, *args, **kwds)
return issue_editor_wrapper
def patchset_required(func):
"""Decorator that processes the patchset_id argument."""
@issue_required
def patchset_wrapper(request, patchset_id, *args, **kwds):
patchset = models.PatchSet.get_by_id(int(patchset_id), parent=request.issue)
if patchset is None:
return HttpResponseNotFound('No patch set exists with that id (%s)' %
patchset_id)
patchset.issue = request.issue
request.patchset = patchset
return func(request, *args, **kwds)
return patchset_wrapper
def patchset_owner_required(func):
"""Decorator that processes the patchset_id argument and insists you own the
issue."""
@patchset_required
@owner_required
def patchset_owner_wrapper(request, *args, **kwds):
return func(request, *args, **kwds)
return patchset_owner_wrapper
def patch_required(func):
"""Decorator that processes the patch_id argument."""
@patchset_required
def patch_wrapper(request, patch_id, *args, **kwds):
patch = models.Patch.get_by_id(int(patch_id), parent=request.patchset)
if patch is None:
return HttpResponseNotFound('No patch exists with that id (%s/%s)' %
(request.patchset.key().id(), patch_id))
patch.patchset = request.patchset
request.patch = patch
return func(request, *args, **kwds)
return patch_wrapper
def patch_filename_required(func):
"""Decorator that processes the patch_id argument."""
@patchset_required
def patch_wrapper(request, patch_filename, *args, **kwds):
patch = models.Patch.gql('WHERE patchset = :1 AND filename = :2',
request.patchset, patch_filename).get()
if patch is None and patch_filename.isdigit():
# It could be an old URL which has a patch ID instead of a filename
patch = models.Patch.get_by_id(int(patch_filename),
parent=request.patchset)
if patch is None:
return respond(request, 'diff_missing.html',
{'issue': request.issue,
'patchset': request.patchset,
'patch': None,
'patchsets': request.issue.patchset_set,
'filename': patch_filename})
patch.patchset = request.patchset
request.patch = patch
return func(request, *args, **kwds)
return patch_wrapper
def image_required(func):
"""Decorator that processes the image argument.
Attributes set on the request:
content: a Content entity.
"""
@patch_required
def image_wrapper(request, image_type, *args, **kwds):
content = None
if image_type == "0":
content = request.patch.content
elif image_type == "1":
content = request.patch.patched_content
# Other values are erroneous so request.content won't be set.
if not content or not content.data:
return HttpResponseRedirect(django_settings.MEDIA_URL + "blank.jpg")
request.content = content
return func(request, *args, **kwds)
return image_wrapper
def json_response(func):
"""Decorator that converts into JSON any returned value that is not an
HttpResponse. It handles `pretty` URL parameter to tune JSON response for
either performance or readability."""
def json_wrapper(request, *args, **kwds):
data = func(request, *args, **kwds)
if isinstance(data, HttpResponse):
return data
if request.REQUEST.get('pretty','0').lower() in ('1', 'true', 'on'):
data = simplejson.dumps(data, indent=' ', sort_keys=True)
else:
data = simplejson.dumps(data, separators=(',',':'))
return HttpResponse(data, content_type='application/json')
return json_wrapper
### Request handlers ###
def index(request):
"""/ - Show a list of patches."""
if request.user is None:
return all(request)
else:
return mine(request)
DEFAULT_LIMIT = 10
def _url(path, **kwargs):
"""Format parameters for query string.
Args:
path: Path of URL.
kwargs: Keyword parameters are treated as values to add to the query
parameter of the URL. If empty no query parameters will be added to
path and '?' omitted from the URL.
"""
if kwargs:
encoded_parameters = urllib.urlencode(kwargs)
if path.endswith('?'):
# Trailing ? on path. Append parameters to end.
return '%s%s' % (path, encoded_parameters)
elif '?' in path:
# Append additional parameters to existing query parameters.
return '%s&%s' % (path, encoded_parameters)
else:
# Add query parameters to path with no query parameters.
return '%s?%s' % (path, encoded_parameters)
else:
return path
def _inner_paginate(request, issues, template, extra_template_params):
"""Display paginated list of issues.
Takes care of the private bit.
Args:
request: Request containing offset and limit parameters.
issues: Issues to be displayed.
template: Name of template that renders issue page.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
visible_issues = [i for i in issues if _can_view_issue(request.user, i)]
_optimize_draft_counts(visible_issues)
_load_users_for_issues(visible_issues)
params = {
'issues': visible_issues,
'limit': None,
'newest': None,
'prev': None,
'next': None,
'nexttext': '',
'first': '',
'last': '',
}
if extra_template_params:
params.update(extra_template_params)
return respond(request, template, params)
def _paginate_issues(page_url,
request,
query,
template,
extra_nav_parameters=None,
extra_template_params=None):
"""Display paginated list of issues.
Args:
page_url: Base URL of issue page that is being paginated. Typically
generated by calling 'reverse' with a name and arguments of a view
function.
request: Request containing offset and limit parameters.
query: Query over issues.
template: Name of template that renders issue page.
extra_nav_parameters: Dictionary of extra parameters to append to the
navigation links.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
offset = _clean_int(request.GET.get('offset'), 0, 0)
limit = _clean_int(request.GET.get('limit'), DEFAULT_LIMIT, 1, 100)
nav_parameters = {'limit': str(limit)}
if extra_nav_parameters is not None:
nav_parameters.update(extra_nav_parameters)
params = {
'limit': limit,
'first': offset + 1,
'nexttext': 'Older',
}
# Fetch one more to see if there should be a 'next' link
issues = query.fetch(limit+1, offset)
if len(issues) > limit:
del issues[limit:]
params['next'] = _url(page_url, offset=offset + limit, **nav_parameters)
params['last'] = len(issues) > 1 and offset+len(issues) or None
if offset > 0:
params['prev'] = _url(page_url, offset=max(0, offset - limit),
**nav_parameters)
if offset > limit:
params['newest'] = _url(page_url, **nav_parameters)
if extra_template_params:
params.update(extra_template_params)
return _inner_paginate(request, issues, template, params)
def _paginate_issues_with_cursor(page_url,
request,
query,
limit,
template,
extra_nav_parameters=None,
extra_template_params=None):
"""Display paginated list of issues using a cursor instead of offset.
Args:
page_url: Base URL of issue page that is being paginated. Typically
generated by calling 'reverse' with a name and arguments of a view
function.
request: Request containing offset and limit parameters.
query: Query over issues.
limit: Maximum number of issues to return.
template: Name of template that renders issue page.
extra_nav_parameters: Dictionary of extra parameters to append to the
navigation links.
extra_template_params: Dictionary of extra parameters to pass to page
rendering.
Returns:
Response for sending back to browser.
"""
issues = query.fetch(limit)
nav_parameters = {}
if extra_nav_parameters:
nav_parameters.update(extra_nav_parameters)
nav_parameters['cursor'] = query.cursor()
params = {
'limit': limit,
'cursor': nav_parameters['cursor'],
'nexttext': 'Newer',
}
# Fetch one more to see if there should be a 'next' link. Do it in a separate
# request so we have a valid cursor.
if query.fetch(1):
params['next'] = _url(page_url, **nav_parameters)
if extra_template_params:
params.update(extra_template_params)
return _inner_paginate(request, issues, template, params)
def all(request):
"""/all - Show a list of up to DEFAULT_LIMIT recent issues."""
closed = request.GET.get('closed') or ''
nav_parameters = {}
if closed:
nav_parameters['closed'] = '1'
if closed:
query = db.GqlQuery('SELECT * FROM Issue '
'WHERE private = FALSE '
'ORDER BY modified DESC')
else:
query = db.GqlQuery('SELECT * FROM Issue '
'WHERE closed = FALSE AND private = FALSE '
'ORDER BY modified DESC')
return _paginate_issues(reverse(all),
request,
query,
'all.html',
extra_nav_parameters=nav_parameters,
extra_template_params=dict(closed=closed))
def _optimize_draft_counts(issues):
"""Force _num_drafts to zero for issues that are known to have no drafts.
Args:
issues: list of model.Issue instances.
This inspects the drafts attribute of the current user's Account
instance, and forces the draft count to zero of those issues in the
list that aren't mentioned there.
If there is no current user, all draft counts are forced to 0.
"""
account = models.Account.current_user_account
if account is None:
issue_ids = None
else:
issue_ids = account.drafts
for issue in issues:
if issue_ids is None or issue.key().id() not in issue_ids:
issue._num_drafts = 0
@login_required
def mine(request):
"""/mine - Show a list of issues created by the current user."""
request.user_to_show = request.user
return _show_user(request)
@login_required
def starred(request):
"""/starred - Show a list of issues starred by the current user."""
stars = models.Account.current_user_account.stars
if not stars:
issues = []
else:
issues = [issue for issue in models.Issue.get_by_id(stars)
if issue is not None
and _can_view_issue(request.user, issue)]
_load_users_for_issues(issues)
_optimize_draft_counts(issues)
return respond(request, 'starred.html', {'issues': issues})
def _load_users_for_issues(issues):
"""Load all user links for a list of issues in one go."""
user_dict = {}
for i in issues:
for e in i.reviewers + i.cc + [i.owner.email()]:
# keeping a count lets you track total vs. distinct if you want
user_dict[e] = user_dict.setdefault(e, 0) + 1
library.get_links_for_users(user_dict.keys())
@user_key_required
def show_user(request):
"""/user - Show the user's dashboard"""
return _show_user(request)
def _show_user(request):
user = request.user_to_show
if user == request.user:
query = models.Comment.all().filter('draft =', True)
query = query.filter('author =', request.user).fetch(100)
draft_keys = set(d.parent_key().parent().parent() for d in query)
draft_issues = models.Issue.get(draft_keys)
else:
draft_issues = draft_keys = []
my_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND owner = :1 '
'ORDER BY modified DESC '
'LIMIT 100',
user)
if issue.key() not in draft_keys and _can_view_issue(request.user, issue)]
review_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND reviewers = :1 '
'ORDER BY modified DESC '
'LIMIT 100',
user.email().lower())
if (issue.key() not in draft_keys and issue.owner != user
and _can_view_issue(request.user, issue))]
closed_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = TRUE AND modified > :1 AND owner = :2 '
'ORDER BY modified DESC '
'LIMIT 100',
datetime.datetime.now() - datetime.timedelta(days=7),
user)
if issue.key() not in draft_keys and _can_view_issue(request.user, issue)]
cc_issues = [
issue for issue in db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND cc = :1 '
'ORDER BY modified DESC '
'LIMIT 100',
user.email())
if (issue.key() not in draft_keys and issue.owner != user
and _can_view_issue(request.user, issue))]
all_issues = my_issues + review_issues + closed_issues + cc_issues
_load_users_for_issues(all_issues)
_optimize_draft_counts(all_issues)
return respond(request, 'user.html',
{'email': user.email(),
'my_issues': my_issues,
'review_issues': review_issues,
'closed_issues': closed_issues,
'cc_issues': cc_issues,
'draft_issues': draft_issues,
})
@login_required
@xsrf_required
def new(request):
"""/new - Upload a new patch set.
GET shows a blank form, POST processes it.
"""
if request.method != 'POST':
form = NewForm()
form.set_branch_choices()
return respond(request, 'new.html', {'form': form})
form = NewForm(request.POST, request.FILES)
form.set_branch_choices()
issue = _make_new(request, form)
if issue is None:
return respond(request, 'new.html', {'form': form})
else:
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
@login_required
@xsrf_required
def use_uploadpy(request):
"""Show an intermediate page about upload.py."""
if request.method == 'POST':
if 'disable_msg' in request.POST:
models.Account.current_user_account.uploadpy_hint = False
models.Account.current_user_account.put()
if 'download' in request.POST:
url = reverse(customized_upload_py)
else:
url = reverse(new)
return HttpResponseRedirect(url)
return respond(request, 'use_uploadpy.html')
@post_required
@upload_required
def upload(request):
"""/upload - Like new() or add(), but from the upload.py script.
This generates a text/plain response.
"""
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpResponse('Login required', status=401)
# Check against old upload.py usage.
if request.POST.get('num_parts') > 1:
return HttpResponse('Upload.py is too old, get the latest version.',
content_type='text/plain')
form = UploadForm(request.POST, request.FILES)
issue = None
patchset = None
if form.is_valid():
issue_id = form.cleaned_data['issue']
if issue_id:
action = 'updated'
issue = models.Issue.get_by_id(issue_id)
if issue is None:
form.errors['issue'] = ['No issue exists with that id (%s)' %
issue_id]
elif issue.local_base and not form.cleaned_data.get('content_upload'):
form.errors['issue'] = ['Base files upload required for that issue.']
issue = None
else:
if request.user != issue.owner:
form.errors['user'] = ['You (%s) don\'t own this issue (%s)' %
(request.user, issue_id)]
issue = None
else:
patchset = _add_patchset_from_form(request, issue, form, 'subject',
emails_add_only=True)
if not patchset:
issue = None
else:
action = 'created'
issue = _make_new(request, form)
if issue is not None:
patchset = issue.patchset
if issue is None:
msg = 'Issue creation errors: %s' % repr(form.errors)
else:
msg = ('Issue %s. URL: %s' %
(action,
request.build_absolute_uri(
reverse('show_bare_issue_number', args=[issue.key().id()]))))
if (form.cleaned_data.get('content_upload') or
form.cleaned_data.get('separate_patches')):
# Extend the response message: 2nd line is patchset id.
msg +="\n%d" % patchset.key().id()
if form.cleaned_data.get('content_upload'):
# Extend the response: additional lines are the expected filenames.
issue.local_base = True
issue.put()
base_hashes = {}
for file_info in form.cleaned_data.get('base_hashes').split("|"):
if not file_info:
break
checksum, filename = file_info.split(":", 1)
base_hashes[filename] = checksum
content_entities = []
new_content_entities = []
patches = list(patchset.patch_set)
existing_patches = {}
patchsets = list(issue.patchset_set)
if len(patchsets) > 1:
# Only check the last uploaded patchset for speed.
last_patch_set = patchsets[-2].patch_set
patchsets = None # Reduce memory usage.
for opatch in last_patch_set:
if opatch.content:
existing_patches[opatch.filename] = opatch
for patch in patches:
content = None
# Check if the base file is already uploaded in another patchset.
if (patch.filename in base_hashes and
patch.filename in existing_patches and
(base_hashes[patch.filename] ==
existing_patches[patch.filename].content.checksum)):
content = existing_patches[patch.filename].content
patch.status = existing_patches[patch.filename].status
patch.is_binary = existing_patches[patch.filename].is_binary
if not content:
content = models.Content(is_uploaded=True, parent=patch)
new_content_entities.append(content)
content_entities.append(content)
existing_patches = None # Reduce memory usage.
if new_content_entities:
db.put(new_content_entities)
for patch, content_entity in zip(patches, content_entities):
patch.content = content_entity
id_string = patch.key().id()
if content_entity not in new_content_entities:
# Base file not needed since we reused a previous upload. Send its
# patch id in case it's a binary file and the new content needs to
# be uploaded. We mark this by prepending 'nobase' to the id.
id_string = "nobase_" + str(id_string)
msg += "\n%s %s" % (id_string, patch.filename)
db.put(patches)
return HttpResponse(msg, content_type='text/plain')
@post_required
@patch_required
@upload_required
def upload_content(request):
"""/<issue>/upload_content/<patchset>/<patch> - Upload base file contents.
Used by upload.py to upload base files.
"""
form = UploadContentForm(request.POST, request.FILES)
if not form.is_valid():
return HttpResponse('ERROR: Upload content errors:\n%s' % repr(form.errors),
content_type='text/plain')
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpResponse('Error: Login required', status=401)
if request.user != request.issue.owner:
return HttpResponse('ERROR: You (%s) don\'t own this issue (%s).' %
(request.user, request.issue.key().id()))
patch = request.patch
patch.status = form.cleaned_data['status']
patch.is_binary = form.cleaned_data['is_binary']
patch.put()
if form.cleaned_data['is_current']:
if patch.patched_content:
return HttpResponse('ERROR: Already have current content.')
content = models.Content(is_uploaded=True, parent=patch)
content.put()
patch.patched_content = content
patch.put()
else:
content = patch.content
if form.cleaned_data['file_too_large']:
content.file_too_large = True
else:
data = form.get_uploaded_content()
checksum = md5.new(data).hexdigest()
if checksum != request.POST.get('checksum'):
content.is_bad = True
content.put()
return HttpResponse('ERROR: Checksum mismatch.',
content_type='text/plain')
if patch.is_binary:
content.data = data
else:
content.text = engine.ToText(engine.UnifyLinebreaks(data))
content.checksum = checksum
content.put()
return HttpResponse('OK', content_type='text/plain')
@post_required
@patchset_required
@upload_required
def upload_patch(request):
"""/<issue>/upload_patch/<patchset> - Upload patch to patchset.
Used by upload.py to upload a patch when the diff is too large to upload all
together.
"""
if request.user is None:
if IS_DEV:
request.user = users.User(request.POST.get('user', 'test@example.com'))
else:
return HttpResponse('Error: Login required', status=401)
if request.user != request.issue.owner:
return HttpResponse('ERROR: You (%s) don\'t own this issue (%s).' %
(request.user, request.issue.key().id()))
form = UploadPatchForm(request.POST, request.FILES)
if not form.is_valid():
return HttpResponse('ERROR: Upload patch errors:\n%s' % repr(form.errors),
content_type='text/plain')
patchset = request.patchset
if patchset.data:
return HttpResponse('ERROR: Can\'t upload patches to patchset with data.',
content_type='text/plain')
text = engine.ToText(engine.UnifyLinebreaks(form.get_uploaded_patch()))
patch = models.Patch(patchset=patchset,
text=text,
filename=form.cleaned_data['filename'], parent=patchset)
patch.put()
if form.cleaned_data.get('content_upload'):
content = models.Content(is_uploaded=True, parent=patch)
content.put()
patch.content = content
patch.put()
msg = 'OK\n' + str(patch.key().id())
return HttpResponse(msg, content_type='text/plain')
class EmptyPatchSet(Exception):
"""Exception used inside _make_new() to break out of the transaction."""
def _make_new(request, form):
"""Helper for new().
Return a valid Issue, or None.
"""
if not form.is_valid():
return None
data_url = _get_data_url(form)
if data_url is None:
return None
data, url, separate_patches = data_url
reviewers = _get_emails(form, 'reviewers')
if not form.is_valid() or reviewers is None:
return None
cc = _get_emails(form, 'cc')
if not form.is_valid():
return None
base = form.get_base()
if base is None:
return None
def txn():
issue = models.Issue(subject=form.cleaned_data['subject'],
description=form.cleaned_data['description'],
base=base,
reviewers=reviewers,
cc=cc,
private=form.cleaned_data.get('private', False),
n_comments=0)
issue.put()
patchset = models.PatchSet(issue=issue, data=data, url=url, parent=issue)
patchset.put()
issue.patchset = patchset
if not separate_patches:
patches = engine.ParsePatchSet(patchset)
if not patches:
raise EmptyPatchSet # Abort the transaction
db.put(patches)
return issue
try:
issue = db.run_in_transaction(txn)
except EmptyPatchSet:
errkey = url and 'url' or 'data'
form.errors[errkey] = ['Patch set contains no recognizable patches']
return None
if form.cleaned_data.get('send_mail'):
msg = _make_message(request, issue, '', '', True)
msg.put()
_notify_issue(request, issue, 'Created')
return issue
def _get_data_url(form):
"""Helper for _make_new() above and add() below.
Args:
form: Django form object.
Returns:
3-tuple (data, url, separate_patches).
data: the diff content, if available.
url: the url of the diff, if given.
separate_patches: True iff the patches will be uploaded separately for
each file.
"""
cleaned_data = form.cleaned_data
data = cleaned_data['data']
url = cleaned_data.get('url')
separate_patches = cleaned_data.get('separate_patches')
if not (data or url or separate_patches):
form.errors['data'] = ['You must specify a URL or upload a file (< 1 MB).']
return None
if data and url:
form.errors['data'] = ['You must specify either a URL or upload a file '
'but not both.']
return None
if separate_patches and (data or url):
form.errors['data'] = ['If the patches will be uploaded separately later, '
'you can\'t send some data or a url.']
return None
if data is not None:
data = db.Blob(engine.UnifyLinebreaks(data.read()))
url = None
elif url:
try:
fetch_result = urlfetch.fetch(url)
except Exception, err:
form.errors['url'] = [str(err)]
return None
if fetch_result.status_code != 200:
form.errors['url'] = ['HTTP status code %s' % fetch_result.status_code]
return None
data = db.Blob(engine.UnifyLinebreaks(fetch_result.content))
return data, url, separate_patches
@post_required
@issue_owner_required
@xsrf_required
def add(request):
"""/<issue>/add - Add a new PatchSet to an existing Issue."""
issue = request.issue
form = AddForm(request.POST, request.FILES)
if not _add_patchset_from_form(request, issue, form):
return show(request, issue.key().id(), form)
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _add_patchset_from_form(request, issue, form, message_key='message',
emails_add_only=False):
"""Helper for add() and upload()."""
# TODO(guido): use a transaction like in _make_new(); may be share more code?
if form.is_valid():
data_url = _get_data_url(form)
if not form.is_valid():
return None
if request.user != issue.owner:
# This check is done at each call site but check again as a safety measure.
return None
data, url, separate_patches = data_url
message = form.cleaned_data[message_key]
patchset = models.PatchSet(issue=issue, message=message, data=data, url=url,
parent=issue)
patchset.put()
if not separate_patches:
patches = engine.ParsePatchSet(patchset)
if not patches:
patchset.delete()
errkey = url and 'url' or 'data'
form.errors[errkey] = ['Patch set contains no recognizable patches']
return None
db.put(patches)
if emails_add_only:
emails = _get_emails(form, 'reviewers')
if not form.is_valid():
return None
issue.reviewers += [reviewer for reviewer in emails
if reviewer not in issue.reviewers]
emails = _get_emails(form, 'cc')
if not form.is_valid():
return None
issue.cc += [cc for cc in emails if cc not in issue.cc]
else:
issue.reviewers = _get_emails(form, 'reviewers')
issue.cc = _get_emails(form, 'cc')
issue.put()
if form.cleaned_data.get('send_mail'):
msg = _make_message(request, issue, message, '', True)
msg.put()
_notify_issue(request, issue, 'Updated')
return patchset
def _get_emails(form, label):
"""Helper to return the list of reviewers, or None for error."""
raw_emails = form.cleaned_data.get(label)
if raw_emails:
return _get_emails_from_raw(raw_emails.split(','), form=form, label=label)
return []
def _get_emails_from_raw(raw_emails, form=None, label=None):
emails = []
for email in raw_emails:
email = email.strip()
if email:
try:
if '@' not in email:
account = models.Account.get_account_for_nickname(email)
if account is None:
raise db.BadValueError('Unknown user: %s' % email)
db_email = db.Email(account.user.email().lower())
elif email.count('@') != 1:
raise db.BadValueError('Invalid email address: %s' % email)
else:
head, tail = email.split('@')
if '.' not in tail:
raise db.BadValueError('Invalid email address: %s' % email)
db_email = db.Email(email.lower())
except db.BadValueError, err:
if form:
form.errors[label] = [unicode(err)]
return None
if db_email not in emails:
emails.append(db_email)
return emails
def _calculate_delta(patch, patchset_id, patchsets):
"""Calculates which files in earlier patchsets this file differs from.
Args:
patch: The file to compare.
patchset_id: The file's patchset's key id.
patchsets: A list of existing patchsets.
Returns:
A list of patchset ids.
"""
delta = []
if patch.no_base_file:
return delta
for other in patchsets:
if patchset_id == other.key().id():
break
if other.data or other.parsed_patches:
# Loading all the Patch entities in every PatchSet takes too long
# (DeadLineExceeded) and consumes a lot of memory (MemoryError) so instead
# just parse the patchset's data. Note we can only do this if the
# patchset was small enough to fit in the data property.
if other.parsed_patches is None:
# PatchSet.data is stored as db.Blob (str). Try to convert it
# to unicode so that Python doesn't need to do this conversion
# when comparing text and patch.text, which is db.Text
# (unicode).
try:
other.parsed_patches = engine.SplitPatch(other.data.decode('utf-8'))
except UnicodeDecodeError: # Fallback to str - unicode comparison.
other.parsed_patches = engine.SplitPatch(other.data)
other.data = None # Reduce memory usage.
for filename, text in other.parsed_patches:
if filename == patch.filename:
if text != patch.text:
delta.append(other.key().id())
break
else:
# We could not find the file in the previous patchset. It must
# be new wrt that patchset.
delta.append(other.key().id())
else:
# other (patchset) is too big to hold all the patches inside itself, so
# we need to go to the datastore. Use the index to see if there's a
# patch against our current file in other.
query = models.Patch.all()
query.filter("filename =", patch.filename)
query.filter("patchset =", other.key())
other_patches = query.fetch(100)
if other_patches and len(other_patches) > 1:
logging.info("Got %s patches with the same filename for a patchset",
len(other_patches))
for op in other_patches:
if op.text != patch.text:
delta.append(other.key().id())
break
else:
# We could not find the file in the previous patchset. It must
# be new wrt that patchset.
delta.append(other.key().id())
return delta
def _get_patchset_info(request, patchset_id):
""" Returns a list of patchsets for the issue.
Args:
request: Django Request object.
patchset_id: The id of the patchset that the caller is interested in. This
is the one that we generate delta links to if they're not available. We
can't generate for all patchsets because it would take too long on issues
with many patchsets. Passing in None is equivalent to doing it for the
last patchset.
Returns:
A 3-tuple of (issue, patchsets, HttpResponse).
If HttpResponse is not None, further processing should stop and it should be
returned.
"""
issue = request.issue
patchsets = list(issue.patchset_set.order('created'))
response = None
if not patchset_id and patchsets:
patchset_id = patchsets[-1].key().id()
if request.user:
drafts = list(models.Comment.gql('WHERE ANCESTOR IS :1 AND draft = TRUE'
' AND author = :2',
issue, request.user))
else:
drafts = []
comments = list(models.Comment.gql('WHERE ANCESTOR IS :1 AND draft = FALSE',
issue))
issue.draft_count = len(drafts)
for c in drafts:
c.ps_key = c.patch.patchset.key()
patchset_id_mapping = {} # Maps from patchset id to its ordering number.
for patchset in patchsets:
patchset_id_mapping[patchset.key().id()] = len(patchset_id_mapping) + 1
patchset.n_drafts = sum(c.ps_key == patchset.key() for c in drafts)
patchset.patches = None
patchset.parsed_patches = None
if patchset_id == patchset.key().id():
patchset.patches = list(patchset.patch_set.order('filename'))
try:
attempt = _clean_int(request.GET.get('attempt'), 0, 0)
if attempt < 0:
response = HttpResponse('Invalid parameter', status=404)
break
for patch in patchset.patches:
pkey = patch.key()
patch._num_comments = sum(c.parent_key() == pkey for c in comments)
patch._num_drafts = sum(c.parent_key() == pkey for c in drafts)
if not patch.delta_calculated:
if attempt > 2:
# Too many patchsets or files and we're not able to generate the
# delta links. Instead of giving a 500, try to render the page
# without them.
patch.delta = []
else:
# Compare each patch to the same file in earlier patchsets to see
# if they differ, so that we can generate the delta patch urls.
# We do this once and cache it after. It's specifically not done
# on upload because we're already doing too much processing there.
# NOTE: this function will clear out patchset.data to reduce
# memory so don't ever call patchset.put() after calling it.
patch.delta = _calculate_delta(patch, patchset_id, patchsets)
patch.delta_calculated = True
# A multi-entity put would be quicker, but it fails when the
# patches have content that is large. App Engine throws
# RequestTooLarge. This way, although not as efficient, allows
# multiple refreshes on an issue to get things done, as opposed to
# an all-or-nothing approach.
patch.put()
# Reduce memory usage: if this patchset has lots of added/removed
# files (i.e. > 100) then we'll get MemoryError when rendering the
# response. Each Patch entity is using a lot of memory if the files
# are large, since it holds the entire contents. Call num_chunks and
# num_drafts first though since they depend on text.
patch.num_chunks
patch.num_drafts
patch.num_added
patch.num_removed
patch.text = None
patch._lines = None
patch.parsed_deltas = []
for delta in patch.delta:
patch.parsed_deltas.append([patchset_id_mapping[delta], delta])
except DeadlineExceededError:
logging.exception('DeadlineExceededError in _get_patchset_info')
if attempt > 2:
response = HttpResponse('DeadlineExceededError - create a new issue.')
else:
response = HttpResponseRedirect('%s?attempt=%d' %
(request.path, attempt + 1))
break
# Reduce memory usage (see above comment).
for patchset in patchsets:
patchset.parsed_patches = None
return issue, patchsets, response
@issue_required
def show(request, form=None):
"""/<issue> - Show an issue."""
issue, patchsets, response = _get_patchset_info(request, None)
if response:
return response
if not form:
form = AddForm(initial={'reviewers': ', '.join(issue.reviewers)})
last_patchset = first_patch = None
if patchsets:
last_patchset = patchsets[-1]
if last_patchset.patches:
first_patch = last_patchset.patches[0]
messages = []
has_draft_message = False
for msg in issue.message_set.order('date'):
if not msg.draft:
messages.append(msg)
elif msg.draft and request.user and msg.sender == request.user.email():
has_draft_message = True
num_patchsets = len(patchsets)
return respond(request, 'issue.html',
{'issue': issue, 'patchsets': patchsets,
'messages': messages, 'form': form,
'last_patchset': last_patchset,
'num_patchsets': num_patchsets,
'first_patch': first_patch,
'has_draft_message': has_draft_message,
})
@patchset_required
def patchset(request):
"""/patchset/<key> - Returns patchset information."""
patchset = request.patchset
issue, patchsets, response = _get_patchset_info(request, patchset.key().id())
if response:
return response
for ps in patchsets:
if ps.key().id() == patchset.key().id():
patchset = ps
return respond(request, 'patchset.html',
{'issue': issue,
'patchset': patchset,
'patchsets': patchsets,
})
@login_required
def account(request):
"""/account/?q=blah&limit=10×tamp=blah - Used for autocomplete."""
def searchAccounts(property, domain, added, response):
query = request.GET.get('q').lower()
limit = _clean_int(request.GET.get('limit'), 10, 10, 100)
accounts = models.Account.all()
accounts.filter("lower_%s >= " % property, query)
accounts.filter("lower_%s < " % property, query + u"\ufffd")
accounts.order("lower_%s" % property);
for account in accounts:
if account.key() in added:
continue
if domain and not account.email.endswith(domain):
continue
if len(added) >= limit:
break
added.add(account.key())
response += '%s (%s)\n' % (account.email, account.nickname)
return added, response
added = set()
response = ''
domain = os.environ['AUTH_DOMAIN']
if domain != 'gmail.com':
# 'gmail.com' is the value AUTH_DOMAIN is set to if the app is running
# on appspot.com and shouldn't prioritize the custom domain.
added, response = searchAccounts("email", domain, added, response)
added, response = searchAccounts("nickname", domain, added, response)
added, response = searchAccounts("nickname", "", added, response)
added, response = searchAccounts("email", "", added, response)
return HttpResponse(response)
@issue_editor_required
@xsrf_required
def edit(request):
"""/<issue>/edit - Edit an issue."""
issue = request.issue
base = issue.base
if issue.local_base:
form_cls = EditLocalBaseForm
else:
form_cls = EditForm
if request.method != 'POST':
reviewers = [models.Account.get_nickname_for_email(reviewer,
default=reviewer)
for reviewer in issue.reviewers]
ccs = [models.Account.get_nickname_for_email(cc, default=cc)
for cc in issue.cc]
form = form_cls(initial={'subject': issue.subject,
'description': issue.description,
'base': base,
'reviewers': ', '.join(reviewers),
'cc': ', '.join(ccs),
'closed': issue.closed,
'private': issue.private,
})
if not issue.local_base:
form.set_branch_choices(base)
return respond(request, 'edit.html', {'issue': issue, 'form': form})
form = form_cls(request.POST)
if not issue.local_base:
form.set_branch_choices()
if form.is_valid():
reviewers = _get_emails(form, 'reviewers')
if form.is_valid():
cc = _get_emails(form, 'cc')
if form.is_valid() and not issue.local_base:
base = form.get_base()
if not form.is_valid():
return respond(request, 'edit.html', {'issue': issue, 'form': form})
cleaned_data = form.cleaned_data
was_closed = issue.closed
issue.subject = cleaned_data['subject']
issue.description = cleaned_data['description']
issue.closed = cleaned_data['closed']
issue.private = cleaned_data.get('private', False)
base_changed = (issue.base != base)
issue.base = base
issue.reviewers = reviewers
issue.cc = cc
if base_changed:
for patchset in issue.patchset_set:
db.run_in_transaction(_delete_cached_contents, list(patchset.patch_set))
issue.put()
if issue.closed == was_closed:
message = 'Edited'
elif issue.closed:
message = 'Closed'
else:
message = 'Reopened'
_notify_issue(request, issue, message)
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _delete_cached_contents(patch_set):
"""Transactional helper for edit() to delete cached contents."""
# TODO(guido): No need to do this in a transaction.
patches = []
contents = []
for patch in patch_set:
try:
content = patch.content
except db.Error:
content = None
try:
patched_content = patch.patched_content
except db.Error:
patched_content = None
if content is not None:
contents.append(content)
if patched_content is not None:
contents.append(patched_content)
patch.content = None
patch.patched_content = None
patches.append(patch)
if contents:
logging.info("Deleting %d contents", len(contents))
db.delete(contents)
if patches:
logging.info("Updating %d patches", len(patches))
db.put(patches)
@post_required
@issue_owner_required
@xsrf_required
def delete(request):
"""/<issue>/delete - Delete an issue. There is no way back."""
issue = request.issue
tbd = [issue]
for cls in [models.PatchSet, models.Patch, models.Comment,
models.Message, models.Content]:
tbd += cls.gql('WHERE ANCESTOR IS :1', issue)
db.delete(tbd)
_notify_issue(request, issue, 'Deleted')
return HttpResponseRedirect(reverse(mine))
@post_required
@patchset_owner_required
@xsrf_required
def delete_patchset(request):
"""/<issue>/patch/<patchset>/delete - Delete a patchset.
There is no way back.
"""
issue = request.issue
ps_delete = request.patchset
ps_id = ps_delete.key().id()
patchsets_after = issue.patchset_set.filter('created >', ps_delete.created)
patches = []
for patchset in patchsets_after:
for patch in patchset.patch_set:
if patch.delta_calculated:
if ps_id in patch.delta:
patches.append(patch)
db.run_in_transaction(_patchset_delete, ps_delete, patches)
_notify_issue(request, issue, 'Patchset deleted')
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _patchset_delete(ps_delete, patches):
"""Transactional helper for delete_patchset.
Args:
ps_delete: The patchset to be deleted.
patches: Patches that have delta against patches of ps_delete.
"""
patchset_id = ps_delete.key().id()
tbp = []
for patch in patches:
patch.delta.remove(patchset_id)
tbp.append(patch)
if tbp:
db.put(tbp)
tbd = [ps_delete]
for cls in [models.Patch, models.Comment]:
tbd += cls.gql('WHERE ANCESTOR IS :1', ps_delete)
db.delete(tbd)
@post_required
@issue_editor_required
@xsrf_required
def close(request):
"""/<issue>/close - Close an issue."""
issue = request.issue
issue.closed = True
if request.method == 'POST':
new_description = request.POST.get('description')
if new_description:
issue.description = new_description
issue.put()
_notify_issue(request, issue, 'Closed')
return HttpResponse('Closed', content_type='text/plain')
@post_required
@issue_required
@upload_required
def mailissue(request):
"""/<issue>/mail - Send mail for an issue.
Used by upload.py.
"""
if request.issue.owner != request.user:
if not IS_DEV:
return HttpResponse('Login required', status=401)
issue = request.issue
msg = _make_message(request, issue, '', '', True)
msg.put()
_notify_issue(request, issue, 'Mailed')
return HttpResponse('OK', content_type='text/plain')
@patchset_required
def download(request):
"""/download/<issue>_<patchset>.diff - Download a patch set."""
if request.patchset.data is None:
return HttpResponseNotFound('Patch set (%s) is too large.'
% request.patchset.key().id())
padding = ''
user_agent = request.META.get('HTTP_USER_AGENT')
if user_agent and 'MSIE' in user_agent:
# Add 256+ bytes of padding to prevent XSS attacks on Internet Explorer.
padding = ('='*67 + '\n') * 4
return HttpResponse(padding + request.patchset.data,
content_type='text/plain')
@issue_required
@upload_required
def description(request):
"""/<issue>/description - Gets/Sets an issue's description.
Used by upload.py or similar scripts.
"""
if request.method != 'POST':
description = request.issue.description or ""
return HttpResponse(description, content_type='text/plain')
if not request.issue.user_can_edit(request.user):
if not IS_DEV:
return HttpResponse('Login required', status=401)
issue = request.issue
issue.description = request.POST.get('description')
issue.put()
_notify_issue(request, issue, 'Changed')
return HttpResponse('')
@issue_required
@upload_required
@json_response
def fields(request):
"""/<issue>/fields - Gets/Sets fields on the issue.
Used by upload.py or similar scripts for partial updates of the issue
without a patchset..
"""
# Only recognizes a few fields for now.
if request.method != 'POST':
fields = request.GET.getlist('field')
response = {}
if 'reviewers' in fields:
response['reviewers'] = request.issue.reviewers or []
if 'description' in fields:
response['description'] = request.issue.description
if 'subject' in fields:
response['subject'] = request.issue.subject
return response
if not request.issue.user_can_edit(request.user):
if not IS_DEV:
return HttpResponse('Login required', status=401)
fields = simplejson.loads(request.POST.get('fields'))
issue = request.issue
if 'description' in fields:
issue.description = fields['description']
if 'reviewers' in fields:
issue.reviewers = _get_emails_from_raw(fields['reviewers'])
if 'subject' in fields:
issue.subject = fields['subject']
issue.put()
_notify_issue(request, issue, 'Changed')
return HttpResponse('')
@patch_required
def patch(request):
"""/<issue>/patch/<patchset>/<patch> - View a raw patch."""
return patch_helper(request)
def patch_helper(request, nav_type='patch'):
"""Returns a unified diff.
Args:
request: Django Request object.
nav_type: the navigation used in the url (i.e. patch/diff/diff2). Normally
the user looks at either unified or side-by-side diffs at one time, going
through all the files in the same mode. However, if side-by-side is not
available for some files, we temporarly switch them to unified view, then
switch them back when we can. This way they don't miss any files.
Returns:
Whatever respond() returns.
"""
_add_next_prev(request.patchset, request.patch)
request.patch.nav_type = nav_type
parsed_lines = patching.ParsePatchToLines(request.patch.lines)
if parsed_lines is None:
return HttpResponseNotFound('Can\'t parse the patch to lines')
rows = engine.RenderUnifiedTableRows(request, parsed_lines)
return respond(request, 'patch.html',
{'patch': request.patch,
'patchset': request.patchset,
'view_style': 'patch',
'rows': rows,
'issue': request.issue,
'context': _clean_int(request.GET.get('context'), -1),
'column_width': _clean_int(request.GET.get('column_width'),
None),
})
@image_required
def image(request):
"""/<issue>/content/<patchset>/<patch>/<content> - Return patch's content."""
return HttpResponse(request.content.data)
@patch_required
def download_patch(request):
"""/download/issue<issue>_<patchset>_<patch>.diff - Download patch."""
return HttpResponse(request.patch.text, content_type='text/plain')
def _issue_as_dict(issue, messages, request=None):
"""Converts an issue into a dict."""
values = {
'owner': library.get_nickname(issue.owner, True, request),
'owner_email': issue.owner.email(),
'modified': str(issue.modified),
'created': str(issue.created),
'closed': issue.closed,
'cc': issue.cc,
'reviewers': issue.reviewers,
'patchsets': [p.key().id() for p in issue.patchset_set.order('created')],
'description': issue.description,
'subject': issue.subject,
'issue': issue.key().id(),
'base_url': issue.base,
'private': issue.private,
}
if messages:
values['messages'] = [
{
'sender': m.sender,
'recipients': m.recipients,
'date': str(m.date),
'text': m.text,
'approval': m.approval,
}
for m in models.Message.gql('WHERE ANCESTOR IS :1', issue)
]
return values
def _patchset_as_dict(patchset, request=None):
"""Converts a patchset into a dict."""
values = {
'patchset': patchset.key().id(),
'issue': patchset.issue.key().id(),
'owner': library.get_nickname(patchset.issue.owner, True, request),
'owner_email': patchset.issue.owner.email(),
'message': patchset.message,
'url': patchset.url,
'created': str(patchset.created),
'modified': str(patchset.modified),
'num_comments': patchset.num_comments,
'files': {},
}
for patch in models.Patch.gql("WHERE patchset = :1", patchset):
# num_comments and num_drafts are left out for performance reason:
# they cause a datastore query on first access. They could be added
# optionally if the need ever arises.
values['files'][patch.filename] = {
'id': patch.key().id(),
'is_binary': patch.is_binary,
'no_base_file': patch.no_base_file,
'num_added': patch.num_added,
'num_chunks': patch.num_chunks,
'num_removed': patch.num_removed,
'status': patch.status,
'property_changes': '\n'.join(patch.property_changes),
}
return values
@issue_required
@json_response
def api_issue(request):
"""/api/<issue> - Gets issue's data as a JSON-encoded dictionary."""
messages = ('messages' in request.GET and
request.GET.get('messages').lower() == 'true')
values = _issue_as_dict(request.issue, messages, request)
return values
@patchset_required
@json_response
def api_patchset(request):
"""/api/<issue>/<patchset> - Gets an issue's patchset data as a JSON-encoded
dictionary.
"""
values = _patchset_as_dict(request.patchset, request)
return values
def _get_context_for_user(request):
"""Returns the context setting for a user.
The value is validated against models.CONTEXT_CHOICES.
If an invalid value is found, the value is overwritten with
engine.DEFAULT_CONTEXT.
"""
get_param = request.GET.get('context') or None
if 'context' in request.GET and get_param is None:
# User wants to see whole file. No further processing is needed.
return get_param
if request.user:
account = models.Account.current_user_account
default_context = account.default_context
else:
default_context = engine.DEFAULT_CONTEXT
context = _clean_int(get_param, default_context)
if context is not None and context not in models.CONTEXT_CHOICES:
context = engine.DEFAULT_CONTEXT
return context
def _get_column_width_for_user(request):
"""Returns the column width setting for a user."""
if request.user:
account = models.Account.current_user_account
default_column_width = account.default_column_width
else:
default_column_width = engine.DEFAULT_COLUMN_WIDTH
column_width = _clean_int(request.GET.get('column_width'),
default_column_width,
engine.MIN_COLUMN_WIDTH, engine.MAX_COLUMN_WIDTH)
return column_width
@patch_filename_required
def diff(request):
"""/<issue>/diff/<patchset>/<patch> - View a patch as a side-by-side diff"""
if request.patch.no_base_file:
# Can't show side-by-side diff since we don't have the base file. Show the
# unified diff instead.
return patch_helper(request, 'diff')
patchset = request.patchset
patch = request.patch
patchsets = list(request.issue.patchset_set.order('created'))
context = _get_context_for_user(request)
column_width = _get_column_width_for_user(request)
if patch.is_binary:
rows = None
else:
try:
rows = _get_diff_table_rows(request, patch, context, column_width)
except engine.FetchError, err:
return HttpResponseNotFound(str(err))
_add_next_prev(patchset, patch)
return respond(request, 'diff.html',
{'issue': request.issue,
'patchset': patchset,
'patch': patch,
'view_style': 'diff',
'rows': rows,
'context': context,
'context_values': models.CONTEXT_CHOICES,
'column_width': column_width,
'patchsets': patchsets,
})
def _get_diff_table_rows(request, patch, context, column_width):
"""Helper function that returns rendered rows for a patch.
Raises:
engine.FetchError if patch parsing or download of base files fails.
"""
chunks = patching.ParsePatchToChunks(patch.lines, patch.filename)
if chunks is None:
raise engine.FetchError('Can\'t parse the patch to chunks')
# Possible engine.FetchErrors are handled in diff() and diff_skipped_lines().
content = request.patch.get_content()
rows = list(engine.RenderDiffTableRows(request, content.lines,
chunks, patch,
context=context,
colwidth=column_width))
if rows and rows[-1] is None:
del rows[-1]
# Get rid of content, which may be bad
if content.is_uploaded and content.text != None:
# Don't delete uploaded content, otherwise get_content()
# will fetch it.
content.is_bad = True
content.text = None
content.put()
else:
content.delete()
request.patch.content = None
request.patch.put()
return rows
@patch_required
@json_response
def diff_skipped_lines(request, id_before, id_after, where, column_width):
"""/<issue>/diff/<patchset>/<patch> - Returns a fragment of skipped lines.
*where* indicates which lines should be expanded:
'b' - move marker line to bottom and expand above
't' - move marker line to top and expand below
'a' - expand all skipped lines
"""
patchset = request.patchset
patch = request.patch
if where == 'a':
context = None
else:
context = _get_context_for_user(request) or 100
column_width = _clean_int(column_width, engine.DEFAULT_COLUMN_WIDTH,
engine.MIN_COLUMN_WIDTH, engine.MAX_COLUMN_WIDTH)
try:
rows = _get_diff_table_rows(request, patch, None, column_width)
except engine.FetchError, err:
return HttpResponse('Error: %s; please report!' % err, status=500)
return _get_skipped_lines_response(rows, id_before, id_after, where, context)
# there's no easy way to put a control character into a regex, so brute-force it
# this is all control characters except \r, \n, and \t
_badchars_re = re.compile(r'[\000\001\002\003\004\005\006\007\010\013\014\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037]')
def _strip_invalid_xml(s):
"""Remove control chars other than \r\n\t from a string to be put in XML."""
if _badchars_re.search(s):
return ''.join(c for c in s if c >= ' ' or c in '\r\n\t')
else:
return s
def _get_skipped_lines_response(rows, id_before, id_after, where, context):
"""Helper function that returns response data for skipped lines"""
response_rows = []
id_before_start = int(id_before)
id_after_end = int(id_after)
if context is not None:
id_before_end = id_before_start+context
id_after_start = id_after_end-context
else:
id_before_end = id_after_start = None
for row in rows:
m = re.match('^<tr( name="hook")? id="pair-(?P<rowcount>\d+)">', row)
if m:
curr_id = int(m.groupdict().get("rowcount"))
# expand below marker line
if (where == 'b'
and curr_id > id_after_start and curr_id <= id_after_end):
response_rows.append(row)
# expand above marker line
elif (where == 't'
and curr_id >= id_before_start and curr_id < id_before_end):
response_rows.append(row)
# expand all skipped lines
elif (where == 'a'
and curr_id >= id_before_start and curr_id <= id_after_end):
response_rows.append(row)
if context is not None and len(response_rows) >= 2*context:
break
# Create a usable structure for the JS part
response = []
response_rows = [_strip_invalid_xml(r) for r in response_rows]
dom = ElementTree.parse(StringIO('<div>%s</div>' % "".join(response_rows)))
for node in dom.getroot().getchildren():
content = [[x.items(), x.text] for x in node.getchildren()]
response.append([node.items(), content])
return response
def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, patch_filename=None):
"""Helper function that returns objects for diff2 views"""
ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue)
if ps_left is None:
return HttpResponseNotFound('No patch set exists with that id (%s)' %
ps_left_id)
ps_left.issue = request.issue
ps_right = models.PatchSet.get_by_id(int(ps_right_id), parent=request.issue)
if ps_right is None:
return HttpResponseNotFound('No patch set exists with that id (%s)' %
ps_right_id)
ps_right.issue = request.issue
if patch_id is not None:
patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right)
else:
patch_right = None
if patch_right is not None:
patch_right.patchset = ps_right
if patch_filename is None:
patch_filename = patch_right.filename
# Now find the corresponding patch in ps_left
patch_left = models.Patch.gql('WHERE patchset = :1 AND filename = :2',
ps_left, patch_filename).get()
if patch_left:
try:
new_content_left = patch_left.get_patched_content()
except engine.FetchError, err:
return HttpResponseNotFound(str(err))
lines_left = new_content_left.lines
elif patch_right:
lines_left = patch_right.get_content().lines
else:
lines_left = []
if patch_right:
try:
new_content_right = patch_right.get_patched_content()
except engine.FetchError, err:
return HttpResponseNotFound(str(err))
lines_right = new_content_right.lines
elif patch_left:
lines_right = patch_left.get_content().lines
else:
lines_right = []
rows = engine.RenderDiff2TableRows(request,
lines_left, patch_left,
lines_right, patch_right,
context=context,
colwidth=column_width)
rows = list(rows)
if rows and rows[-1] is None:
del rows[-1]
return dict(patch_left=patch_left, patch_right=patch_right,
ps_left=ps_left, ps_right=ps_right, rows=rows)
@issue_required
def diff2(request, ps_left_id, ps_right_id, patch_filename):
"""/<issue>/diff2/... - View the delta between two different patch sets."""
context = _get_context_for_user(request)
column_width = _get_column_width_for_user(request)
ps_right = models.PatchSet.get_by_id(int(ps_right_id), parent=request.issue)
patch_right = None
if ps_right:
patch_right = models.Patch.gql('WHERE patchset = :1 AND filename = :2',
ps_right, patch_filename).get()
if patch_right:
patch_id = patch_right.key().id()
elif patch_filename.isdigit():
# Perhaps it's an ID that's passed in, based on the old URL scheme.
patch_id = int(patch_filename)
else: # patch doesn't exist in this patchset
patch_id = None
data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,
column_width, patch_filename)
if isinstance(data, HttpResponseNotFound):
return data
patchsets = list(request.issue.patchset_set.order('created'))
if data["patch_right"]:
_add_next_prev2(data["ps_left"], data["ps_right"], data["patch_right"])
return respond(request, 'diff2.html',
{'issue': request.issue,
'ps_left': data["ps_left"],
'patch_left': data["patch_left"],
'ps_right': data["ps_right"],
'patch_right': data["patch_right"],
'rows': data["rows"],
'patch_id': patch_id,
'context': context,
'context_values': models.CONTEXT_CHOICES,
'column_width': column_width,
'patchsets': patchsets,
'filename': patch_filename,
})
@issue_required
@json_response
def diff2_skipped_lines(request, ps_left_id, ps_right_id, patch_id,
id_before, id_after, where, column_width):
"""/<issue>/diff2/... - Returns a fragment of skipped lines"""
column_width = _clean_int(column_width, engine.DEFAULT_COLUMN_WIDTH,
engine.MIN_COLUMN_WIDTH, engine.MAX_COLUMN_WIDTH)
if where == 'a':
context = None
else:
context = _get_context_for_user(request) or 100
data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, 10000,
column_width)
if isinstance(data, HttpResponseNotFound):
return data
return _get_skipped_lines_response(data["rows"], id_before, id_after,
where, context)
def _get_comment_counts(account, patchset):
"""Helper to get comment counts for all patches in a single query.
The helper returns two dictionaries comments_by_patch and
drafts_by_patch with patch key as key and comment count as
value. Patches without comments or drafts are not present in those
dictionaries.
"""
# A key-only query won't work because we need to fetch the patch key
# in the for loop further down.
comment_query = models.Comment.all()
comment_query.ancestor(patchset)
# Get all comment counts with one query rather than one per patch.
comments_by_patch = {}
drafts_by_patch = {}
for c in comment_query:
pkey = models.Comment.patch.get_value_for_datastore(c)
if not c.draft:
comments_by_patch[pkey] = comments_by_patch.setdefault(pkey, 0) + 1
elif account and c.author == account.user:
drafts_by_patch[pkey] = drafts_by_patch.setdefault(pkey, 0) + 1
return comments_by_patch, drafts_by_patch
def _add_next_prev(patchset, patch):
"""Helper to add .next and .prev attributes to a patch object."""
patch.prev = patch.next = None
patches = list(models.Patch.gql("WHERE patchset = :1 ORDER BY filename",
patchset))
patchset.patches = patches # Required to render the jump to select.
comments_by_patch, drafts_by_patch = _get_comment_counts(
models.Account.current_user_account, patchset)
last_patch = None
next_patch = None
last_patch_with_comment = None
next_patch_with_comment = None
found_patch = False
for p in patches:
if p.filename == patch.filename:
found_patch = True
continue
p._num_comments = comments_by_patch.get(p.key(), 0)
p._num_drafts = drafts_by_patch.get(p.key(), 0)
if not found_patch:
last_patch = p
if p.num_comments > 0 or p.num_drafts > 0:
last_patch_with_comment = p
else:
if next_patch is None:
next_patch = p
if p.num_comments > 0 or p.num_drafts > 0:
next_patch_with_comment = p
# safe to stop scanning now because the next with out a comment
# will already have been filled in by some earlier patch
break
patch.prev = last_patch
patch.next = next_patch
patch.prev_with_comment = last_patch_with_comment
patch.next_with_comment = next_patch_with_comment
def _add_next_prev2(ps_left, ps_right, patch_right):
"""Helper to add .next and .prev attributes to a patch object."""
patch_right.prev = patch_right.next = None
patches = list(models.Patch.gql("WHERE patchset = :1 ORDER BY filename",
ps_right))
ps_right.patches = patches # Required to render the jump to select.
n_comments, n_drafts = _get_comment_counts(
models.Account.current_user_account, ps_right)
last_patch = None
next_patch = None
last_patch_with_comment = None
next_patch_with_comment = None
found_patch = False
for p in patches:
if p.filename == patch_right.filename:
found_patch = True
continue
p._num_comments = n_comments.get(p.key(), 0)
p._num_drafts = n_drafts.get(p.key(), 0)
if not found_patch:
last_patch = p
if ((p.num_comments > 0 or p.num_drafts > 0) and
ps_left.key().id() in p.delta):
last_patch_with_comment = p
else:
if next_patch is None:
next_patch = p
if ((p.num_comments > 0 or p.num_drafts > 0) and
ps_left.key().id() in p.delta):
next_patch_with_comment = p
# safe to stop scanning now because the next with out a comment
# will already have been filled in by some earlier patch
break
patch_right.prev = last_patch
patch_right.next = next_patch
patch_right.prev_with_comment = last_patch_with_comment
patch_right.next_with_comment = next_patch_with_comment
@post_required
def inline_draft(request):
"""/inline_draft - Ajax handler to submit an in-line draft comment.
This wraps _inline_draft(); all exceptions are logged and cause an
abbreviated response indicating something went wrong.
Note: creating or editing draft comments is *not* XSRF-protected,
because it is not unusual to come back after hours; the XSRF tokens
time out after 1 or 2 hours. The final submit of the drafts for
others to view *is* XSRF-protected.
"""
try:
return _inline_draft(request)
except Exception, err:
logging.exception('Exception in inline_draft processing:')
# TODO(guido): return some kind of error instead?
# Return HttpResponse for now because the JS part expects
# a 200 status code.
return HttpResponse('<font color="red">Error: %s; please report!</font>' %
err.__class__.__name__)
def _inline_draft(request):
"""Helper to submit an in-line draft comment."""
# TODO(guido): turn asserts marked with XXX into errors
# Don't use @login_required, since the JS doesn't understand redirects.
if not request.user:
# Don't log this, spammers have started abusing this.
return HttpResponse('Not logged in')
snapshot = request.POST.get('snapshot')
assert snapshot in ('old', 'new'), repr(snapshot)
left = (snapshot == 'old')
side = request.POST.get('side')
assert side in ('a', 'b'), repr(side) # Display left (a) or right (b)
issue_id = int(request.POST['issue'])
issue = models.Issue.get_by_id(issue_id)
assert issue # XXX
patchset_id = int(request.POST.get('patchset') or
request.POST[side == 'a' and 'ps_left' or 'ps_right'])
patchset = models.PatchSet.get_by_id(int(patchset_id), parent=issue)
assert patchset # XXX
patch_id = int(request.POST.get('patch') or
request.POST[side == 'a' and 'patch_left' or 'patch_right'])
patch = models.Patch.get_by_id(int(patch_id), parent=patchset)
assert patch # XXX
text = request.POST.get('text')
lineno = int(request.POST['lineno'])
message_id = request.POST.get('message_id')
comment = None
if message_id:
comment = models.Comment.get_by_key_name(message_id, parent=patch)
if comment is None or not comment.draft or comment.author != request.user:
comment = None
message_id = None
if not message_id:
# Prefix with 'z' to avoid key names starting with digits.
message_id = 'z' + binascii.hexlify(_random_bytes(16))
if not text.rstrip():
if comment is not None:
assert comment.draft and comment.author == request.user
comment.delete() # Deletion
comment = None
# Re-query the comment count.
models.Account.current_user_account.update_drafts(issue)
else:
if comment is None:
comment = models.Comment(key_name=message_id, parent=patch)
comment.patch = patch
comment.lineno = lineno
comment.left = left
comment.text = db.Text(text)
comment.message_id = message_id
comment.put()
# The actual count doesn't matter, just that there's at least one.
models.Account.current_user_account.update_drafts(issue, 1)
query = models.Comment.gql(
'WHERE patch = :patch AND lineno = :lineno AND left = :left '
'ORDER BY date',
patch=patch, lineno=lineno, left=left)
comments = list(c for c in query if not c.draft or c.author == request.user)
if comment is not None and comment.author is None:
# Show anonymous draft even though we don't save it
comments.append(comment)
if not comments:
return HttpResponse(' ')
for c in comments:
c.complete(patch)
return render_to_response('inline_comment.html',
{'user': request.user,
'patch': patch,
'patchset': patchset,
'issue': issue,
'comments': comments,
'lineno': lineno,
'snapshot': snapshot,
'side': side,
},
context_instance=RequestContext(request))
def _get_affected_files(issue, full_diff=False):
"""Helper to return a list of affected files from the latest patchset.
Args:
issue: Issue instance.
full_diff: If true, include the entire diff even if it exceeds 100 lines.
Returns:
2-tuple containing a list of affected files, and the diff contents if it
is less than 100 lines (otherwise the second item is an empty string).
"""
files = []
modified_count = 0
diff = ''
patchsets = list(issue.patchset_set.order('created'))
if len(patchsets):
patchset = patchsets[-1]
for patch in patchset.patch_set.order('filename'):
file_str = ''
if patch.status:
file_str += patch.status + ' '
file_str += patch.filename
files.append(file_str)
# No point in loading patches if the patchset is too large for email.
if full_diff or modified_count < 100:
modified_count += patch.num_added + patch.num_removed
if full_diff or modified_count < 100:
diff = patchset.data
return files, diff
def _get_mail_template(request, issue, full_diff=False):
"""Helper to return the template and context for an email.
If this is the first email sent by the owner, a template that lists the
reviewers, description and files is used.
"""
context = {}
template = 'mails/comment.txt'
if request.user == issue.owner:
if db.GqlQuery('SELECT * FROM Message WHERE ANCESTOR IS :1 AND sender = :2',
issue, db.Email(request.user.email())).count(1) == 0:
template = 'mails/review.txt'
files, patch = _get_affected_files(issue, full_diff)
context.update({'files': files, 'patch': patch, 'base': issue.base})
return template, context
@login_required
@issue_required
@xsrf_required
def publish(request):
""" /<issue>/publish - Publish draft comments and send mail."""
issue = request.issue
if request.user == issue.owner:
form_class = PublishForm
else:
form_class = MiniPublishForm
draft_message = None
if not request.POST.get('message_only', None):
query = models.Message.gql(('WHERE issue = :1 AND sender = :2 '
'AND draft = TRUE'), issue,
request.user.email())
draft_message = query.get()
if request.method != 'POST':
reviewers = issue.reviewers[:]
cc = issue.cc[:]
if request.user != issue.owner and (request.user.email()
not in issue.reviewers):
reviewers.append(request.user.email())
if request.user.email() in cc:
cc.remove(request.user.email())
reviewers = [models.Account.get_nickname_for_email(reviewer,
default=reviewer)
for reviewer in reviewers]
ccs = [models.Account.get_nickname_for_email(cc, default=cc) for cc in cc]
tbd, comments = _get_draft_comments(request, issue, True)
preview = _get_draft_details(request, comments)
if draft_message is None:
msg = ''
else:
msg = draft_message.text
form = form_class(initial={'subject': issue.subject,
'reviewers': ', '.join(reviewers),
'cc': ', '.join(ccs),
'send_mail': True,
'message': msg,
})
return respond(request, 'publish.html', {'form': form,
'issue': issue,
'preview': preview,
'draft_message': draft_message,
})
form = form_class(request.POST)
if not form.is_valid():
return respond(request, 'publish.html', {'form': form, 'issue': issue})
if request.user == issue.owner:
issue.subject = form.cleaned_data['subject']
if form.is_valid() and not form.cleaned_data.get('message_only', False):
reviewers = _get_emails(form, 'reviewers')
else:
reviewers = issue.reviewers
if request.user != issue.owner and request.user.email() not in reviewers:
reviewers.append(db.Email(request.user.email()))
if form.is_valid() and not form.cleaned_data.get('message_only', False):
cc = _get_emails(form, 'cc')
else:
cc = issue.cc
# The user is in the reviewer list, remove them from CC if they're there.
if request.user.email() in cc:
cc.remove(request.user.email())
if not form.is_valid():
return respond(request, 'publish.html', {'form': form, 'issue': issue})
issue.reviewers = reviewers
issue.cc = cc
if not form.cleaned_data.get('message_only', False):
tbd, comments = _get_draft_comments(request, issue)
else:
tbd = []
comments = []
issue.update_comment_count(len(comments))
tbd.append(issue)
if comments:
logging.warn('Publishing %d comments', len(comments))
msg = _make_message(request, issue,
form.cleaned_data['message'],
comments,
form.cleaned_data['send_mail'],
draft=draft_message)
tbd.append(msg)
for obj in tbd:
db.put(obj)
_notify_issue(request, issue, 'Comments published')
# There are now no comments here (modulo race conditions)
models.Account.current_user_account.update_drafts(issue, 0)
if form.cleaned_data.get('no_redirect', False):
return HttpResponse('OK', content_type='text/plain')
return HttpResponseRedirect(reverse(show, args=[issue.key().id()]))
def _encode_safely(s):
"""Helper to turn a unicode string into 8-bit bytes."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return s
def _get_draft_comments(request, issue, preview=False):
"""Helper to return objects to put() and a list of draft comments.
If preview is True, the list of objects to put() is empty to avoid changes
to the datastore.
Args:
request: Django Request object.
issue: Issue instance.
preview: Preview flag (default: False).
Returns:
2-tuple (put_objects, comments).
"""
comments = []
tbd = []
# XXX Should request all drafts for this issue once, now we can.
for patchset in issue.patchset_set.order('created'):
ps_comments = list(models.Comment.gql(
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
patchset, request.user))
if ps_comments:
patches = dict((p.key(), p) for p in patchset.patch_set)
for p in patches.itervalues():
p.patchset = patchset
for c in ps_comments:
c.draft = False
# Get the patch key value without loading the patch entity.
# NOTE: Unlike the old version of this code, this is the
# recommended and documented way to do this!
pkey = models.Comment.patch.get_value_for_datastore(c)
if pkey in patches:
patch = patches[pkey]
c.patch = patch
if not preview:
tbd.append(ps_comments)
patchset.update_comment_count(len(ps_comments))
tbd.append(patchset)
ps_comments.sort(key=lambda c: (c.patch.filename, not c.left,
c.lineno, c.date))
comments += ps_comments
return tbd, comments
def _get_draft_details(request, comments):
"""Helper to display comments with context in the email message."""
last_key = None
output = []
linecache = {} # Maps (c.patch.key(), c.left) to list of lines
modified_patches = []
for c in comments:
if (c.patch.key(), c.left) != last_key:
url = request.build_absolute_uri(
reverse(diff, args=[request.issue.key().id(),
c.patch.patchset.key().id(),
c.patch.filename]))
output.append('\n%s\nFile %s (%s):' % (url, c.patch.filename,
c.left and "left" or "right"))
last_key = (c.patch.key(), c.left)
patch = c.patch
if patch.no_base_file:
linecache[last_key] = patching.ParsePatchToLines(patch.lines)
else:
if c.left:
old_lines = patch.get_content().text.splitlines(True)
linecache[last_key] = old_lines
else:
new_lines = patch.get_patched_content().text.splitlines(True)
linecache[last_key] = new_lines
file_lines = linecache[last_key]
context = ''
if patch.no_base_file:
for old_line_no, new_line_no, line_text in file_lines:
if ((c.lineno == old_line_no and c.left) or
(c.lineno == new_line_no and not c.left)):
context = line_text.strip()
break
else:
if 1 <= c.lineno <= len(file_lines):
context = file_lines[c.lineno - 1].strip()
url = request.build_absolute_uri(
'%s#%scode%d' % (reverse(diff, args=[request.issue.key().id(),
c.patch.patchset.key().id(),
c.patch.filename]),
c.left and "old" or "new",
c.lineno))
output.append('\n%s\n%s:%d: %s\n%s' % (url, c.patch.filename, c.lineno,
context, c.text.rstrip()))
if modified_patches:
db.put(modified_patches)
return '\n'.join(output)
def _make_message(request, issue, message, comments=None, send_mail=False,
draft=None):
"""Helper to create a Message instance and optionally send an email."""
attach_patch = request.POST.get("attach_patch") == "yes"
template, context = _get_mail_template(request, issue, full_diff=attach_patch)
# Decide who should receive mail
my_email = db.Email(request.user.email())
to = [db.Email(issue.owner.email())] + issue.reviewers
cc = issue.cc[:]
if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:
cc.append(db.Email(django_settings.RIETVELD_INCOMING_MAIL_ADDRESS))
reply_to = to + cc
if my_email in to and len(to) > 1: # send_mail() wants a non-empty to list
to.remove(my_email)
if my_email in cc:
cc.remove(my_email)
subject = '%s (issue %d)' % (issue.subject, issue.key().id())
patch = None
if attach_patch:
subject = 'PATCH: ' + subject
if 'patch' in context:
patch = context['patch']
del context['patch']
if issue.message_set.count(1) > 0:
subject = 'Re: ' + subject
if comments:
details = _get_draft_details(request, comments)
else:
details = ''
message = message.replace('\r\n', '\n')
text = ((message.strip() + '\n\n' + details.strip())).strip()
if draft is None:
msg = models.Message(issue=issue,
subject=subject,
sender=my_email,
recipients=reply_to,
text=db.Text(text),
parent=issue)
else:
msg = draft
msg.subject = subject
msg.recipients = reply_to
msg.text = db.Text(text)
msg.draft = False
msg.date = datetime.datetime.now()
if send_mail:
# Limit the list of files in the email to approximately 200
if 'files' in context and len(context['files']) > 210:
num_trimmed = len(context['files']) - 200
del context['files'][200:]
context['files'].append('[[ %d additional files ]]' % num_trimmed)
url = request.build_absolute_uri(reverse(show, args=[issue.key().id()]))
reviewer_nicknames = ', '.join(library.get_nickname(rev_temp, True,
request)
for rev_temp in issue.reviewers)
cc_nicknames = ', '.join(library.get_nickname(cc_temp, True, request)
for cc_temp in cc)
my_nickname = library.get_nickname(request.user, True, request)
reply_to = ', '.join(reply_to)
description = (issue.description or '').replace('\r\n', '\n')
home = request.build_absolute_uri(reverse(index))
context.update({'reviewer_nicknames': reviewer_nicknames,
'cc_nicknames': cc_nicknames,
'my_nickname': my_nickname, 'url': url,
'message': message, 'details': details,
'description': description, 'home': home,
})
body = django.template.loader.render_to_string(
template, context, context_instance=RequestContext(request))
logging.warn('Mail: to=%s; cc=%s', ', '.join(to), ', '.join(cc))
send_args = {'sender': my_email,
'to': [_encode_safely(address) for address in to],
'subject': _encode_safely(subject),
'body': _encode_safely(body),
'reply_to': _encode_safely(reply_to)}
if cc:
send_args['cc'] = [_encode_safely(address) for address in cc]
if patch:
send_args['attachments'] = [('issue_%s_patch.diff' % issue.key().id(),
patch)]
attempts = 0
while True:
try:
mail.send_mail(**send_args)
break
except apiproxy_errors.DeadlineExceededError:
# apiproxy_errors.DeadlineExceededError is raised when the
# deadline of an API call is reached (e.g. for mail it's
# something about 5 seconds). It's not the same as the lethal
# runtime.DeadlineExeededError.
attempts += 1
if attempts >= 3:
raise
if attempts:
logging.warning("Retried sending email %s times", attempts)
return msg
@post_required
@login_required
@xsrf_required
@issue_required
def star(request):
"""Add a star to an Issue."""
account = models.Account.current_user_account
account.user_has_selected_nickname() # This will preserve account.fresh.
if account.stars is None:
account.stars = []
id = request.issue.key().id()
if id not in account.stars:
account.stars.append(id)
account.put()
return respond(request, 'issue_star.html', {'issue': request.issue})
@post_required
@login_required
@issue_required
@xsrf_required
def unstar(request):
"""Remove the star from an Issue."""
account = models.Account.current_user_account
account.user_has_selected_nickname() # This will preserve account.fresh.
if account.stars is None:
account.stars = []
id = request.issue.key().id()
if id in account.stars:
account.stars[:] = [i for i in account.stars if i != id]
account.put()
return respond(request, 'issue_star.html', {'issue': request.issue})
@login_required
@issue_required
def draft_message(request):
"""/<issue>/draft_message - Retrieve, modify and delete draft messages.
Note: creating or editing draft messages is *not* XSRF-protected,
because it is not unusual to come back after hours; the XSRF tokens
time out after 1 or 2 hours. The final submit of the drafts for
others to view *is* XSRF-protected.
"""
query = models.Message.gql(('WHERE issue = :1 AND sender = :2 '
'AND draft = TRUE'),
request.issue, request.user.email())
if query.count() == 0:
draft_message = None
else:
draft_message = query.get()
if request.method == 'GET':
return _get_draft_message(request, draft_message)
elif request.method == 'POST':
return _post_draft_message(request, draft_message)
elif request.method == 'DELETE':
return _delete_draft_message(request, draft_message)
return HttpResponse('An error occurred.', content_type='text/plain',
status=500)
def _get_draft_message(request, draft):
"""Handles GET requests to /<issue>/draft_message.
Arguments:
request: The current request.
draft: A Message instance or None.
Returns the content of a draft message or an empty string if draft is None.
"""
if draft is None:
return HttpResponse('', content_type='text/plain')
return HttpResponse(draft.text, content_type='text/plain')
def _post_draft_message(request, draft):
"""Handles POST requests to /<issue>/draft_message.
If draft is None a new message is created.
Arguments:
request: The current request.
draft: A Message instance or None.
"""
if draft is None:
draft = models.Message(issue=request.issue, parent=request.issue,
sender=request.user.email(), draft=True)
draft.text = request.POST.get('reviewmsg')
draft.put()
return HttpResponse(draft.text, content_type='text/plain')
def _delete_draft_message(request, draft):
"""Handles DELETE requests to /<issue>/draft_message.
Deletes a draft message.
Arguments:
request: The current request.
draft: A Message instance or None.
"""
if draft is not None:
draft.delete()
return HttpResponse('OK', content_type='text/plain')
@json_response
def search(request):
"""/search - Search for issues or patchset."""
if request.method == 'GET':
form = SearchForm(request.GET)
if not form.is_valid() or not request.GET:
return respond(request, 'search.html', {'form': form})
else:
form = SearchForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest('Invalid arguments',
content_type='text/plain')
logging.info('%s' % form.cleaned_data)
keys_only = form.cleaned_data['keys_only'] or False
format = form.cleaned_data.get('format') or 'html'
if format == 'html':
keys_only = False
q = models.Issue.all(keys_only=keys_only)
if form.cleaned_data.get('cursor'):
q.with_cursor(form.cleaned_data['cursor'])
if form.cleaned_data.get('closed') != None:
q.filter('closed = ', form.cleaned_data['closed'])
if form.cleaned_data.get('owner'):
q.filter('owner = ', form.cleaned_data['owner'])
if form.cleaned_data.get('reviewer'):
q.filter('reviewers = ', form.cleaned_data['reviewer'])
if form.cleaned_data.get('private') != None:
q.filter('private = ', form.cleaned_data['private'])
if form.cleaned_data.get('base'):
q.filter('base = ', form.cleaned_data['base'])
# Update the cursor value in the result.
if format == 'html':
nav_params = dict(
(k, v) for k, v in form.cleaned_data.iteritems() if v is not None)
return _paginate_issues_with_cursor(
reverse(search),
request,
q,
form.cleaned_data['limit'] or DEFAULT_LIMIT,
'search_results.html',
extra_nav_parameters=nav_params)
results = q.fetch(form.cleaned_data['limit'] or 100)
form.cleaned_data['cursor'] = q.cursor()
if keys_only:
# There's not enough information to filter. The only thing that is leaked is
# the issue's key.
filtered_results = results
else:
filtered_results = [i for i in results if _can_view_issue(request.user, i)]
data = {
'cursor': form.cleaned_data['cursor'],
}
if keys_only:
data['results'] = [i.id() for i in filtered_results]
else:
messages = form.cleaned_data['with_messages']
data['results'] = [_issue_as_dict(i, messages, request)
for i in filtered_results],
return data
### Repositories and Branches ###
def repos(request):
"""/repos - Show the list of known Subversion repositories."""
# Clean up garbage created by buggy edits
bad_branches = list(models.Branch.gql('WHERE owner = :1', None))
if bad_branches:
db.delete(bad_branches)
repo_map = {}
for repo in list(models.Repository.all()):
repo_map[str(repo.key())] = repo
branches = []
for branch in models.Branch.all():
branch.repository = repo_map[str(branch._repo)]
branches.append(branch)
branches.sort(key=lambda b: map(
unicode.lower, (b.repository.name, b.category, b.name)))
return respond(request, 'repos.html', {'branches': branches})
@login_required
@xsrf_required
def repo_new(request):
"""/repo_new - Create a new Subversion repository record."""
if request.method != 'POST':
form = RepoForm()
return respond(request, 'repo_new.html', {'form': form})
form = RepoForm(request.POST)
errors = form.errors
if not errors:
try:
repo = form.save(commit=False)
except ValueError, err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'repo_new.html', {'form': form})
repo.put()
branch_url = repo.url
if not branch_url.endswith('/'):
branch_url += '/'
branch_url += 'trunk/'
branch = models.Branch(repo=repo, repo_name=repo.name,
category='*trunk*', name='Trunk',
url=branch_url)
branch.put()
return HttpResponseRedirect(reverse(repos))
SVN_ROOT = 'http://svn.python.org/view/*checkout*/python/'
BRANCHES = [
# category, name, url suffix
('*trunk*', 'Trunk', 'trunk/'),
('branch', '2.5', 'branches/release25-maint/'),
('branch', 'py3k', 'branches/py3k/'),
]
# TODO: Make this a POST request to avoid XSRF attacks.
@admin_required
def repo_init(request):
"""/repo_init - Initialze the list of known Subversion repositories."""
python = models.Repository.gql("WHERE name = 'Python'").get()
if python is None:
python = models.Repository(name='Python', url=SVN_ROOT)
python.put()
pybranches = []
else:
pybranches = list(models.Branch.gql('WHERE repo = :1', python))
for category, name, url in BRANCHES:
url = python.url + url
for br in pybranches:
if (br.category, br.name, br.url) == (category, name, url):
break
else:
br = models.Branch(repo=python, repo_name='Python',
category=category, name=name, url=url)
br.put()
return HttpResponseRedirect(reverse(repos))
@login_required
@xsrf_required
def branch_new(request, repo_id):
"""/branch_new/<repo> - Add a new Branch to a Repository record."""
repo = models.Repository.get_by_id(int(repo_id))
if request.method != 'POST':
# XXX Use repo.key() so that the default gets picked up
form = BranchForm(initial={'repo': repo.key(),
'url': repo.url,
'category': 'branch',
})
return respond(request, 'branch_new.html', {'form': form, 'repo': repo})
form = BranchForm(request.POST)
errors = form.errors
if not errors:
try:
branch = form.save(commit=False)
except ValueError, err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'branch_new.html', {'form': form, 'repo': repo})
branch.repo_name = repo.name
branch.put()
return HttpResponseRedirect(reverse(repos))
@login_required
@xsrf_required
def branch_edit(request, branch_id):
"""/branch_edit/<branch> - Edit a Branch record."""
branch = models.Branch.get_by_id(int(branch_id))
if branch.owner != request.user:
return HttpResponseForbidden('You do not own this branch')
if request.method != 'POST':
form = BranchForm(instance=branch)
return respond(request, 'branch_edit.html',
{'branch': branch, 'form': form})
form = BranchForm(request.POST, instance=branch)
errors = form.errors
if not errors:
try:
branch = form.save(commit=False)
except ValueError, err:
errors['__all__'] = unicode(err)
if errors:
return respond(request, 'branch_edit.html',
{'branch': branch, 'form': form})
branch.repo_name = branch.repo.name
branch.put()
return HttpResponseRedirect(reverse(repos))
@post_required
@login_required
@xsrf_required
def branch_delete(request, branch_id):
"""/branch_delete/<branch> - Delete a Branch record."""
branch = models.Branch.get_by_id(int(branch_id))
if branch.owner != request.user:
return HttpResponseForbidden('You do not own this branch')
repo = branch.repo
branch.delete()
num_branches = models.Branch.gql('WHERE repo = :1', repo).count()
if not num_branches:
# Even if we don't own the repository? Yes, I think so! Empty
# repositories have no representation on screen.
repo.delete()
return HttpResponseRedirect(reverse(repos))
### User Profiles ###
@login_required
@xsrf_required
def settings(request):
account = models.Account.current_user_account
if request.method != 'POST':
nickname = account.nickname
default_context = account.default_context
default_column_width = account.default_column_width
form = SettingsForm(initial={'nickname': nickname,
'context': default_context,
'column_width': default_column_width,
'notify_by_email': account.notify_by_email,
'notify_by_chat': account.notify_by_chat,
})
chat_status = None
if account.notify_by_chat:
try:
presence = xmpp.get_presence(account.email)
except Exception, err:
logging.error('Exception getting XMPP presence: %s', err)
chat_status = 'Error (%s)' % err
else:
if presence:
chat_status = 'online'
else:
chat_status = 'offline'
return respond(request, 'settings.html', {'form': form,
'chat_status': chat_status})
form = SettingsForm(request.POST)
if form.is_valid():
account.nickname = form.cleaned_data.get('nickname')
account.default_context = form.cleaned_data.get('context')
account.default_column_width = form.cleaned_data.get('column_width')
account.notify_by_email = form.cleaned_data.get('notify_by_email')
notify_by_chat = form.cleaned_data.get('notify_by_chat')
must_invite = notify_by_chat and not account.notify_by_chat
account.notify_by_chat = notify_by_chat
account.fresh = False
account.put()
if must_invite:
logging.info('Sending XMPP invite to %s', account.email)
try:
xmpp.send_invite(account.email)
except Exception, err:
# XXX How to tell user it failed?
logging.error('XMPP invite to %s failed', account.email)
else:
return respond(request, 'settings.html', {'form': form})
return HttpResponseRedirect(reverse(mine))
@post_required
@login_required
@xsrf_required
def account_delete(request):
account = models.Account.current_user_account
account.delete()
return HttpResponseRedirect(users.create_logout_url(reverse(index)))
@user_key_required
def user_popup(request):
"""/user_popup - Pop up to show the user info."""
try:
return _user_popup(request)
except Exception, err:
logging.exception('Exception in user_popup processing:')
# Return HttpResponse because the JS part expects a 200 status code.
return HttpResponse('<font color="red">Error: %s; please report!</font>' %
err.__class__.__name__)
def _user_popup(request):
user = request.user_to_show
popup_html = memcache.get('user_popup:' + user.email())
if popup_html is None:
num_issues_created = db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND owner = :1',
user).count()
num_issues_reviewed = db.GqlQuery(
'SELECT * FROM Issue '
'WHERE closed = FALSE AND reviewers = :1',
user.email()).count()
user.nickname = models.Account.get_nickname_for_email(user.email())
popup_html = render_to_response('user_popup.html',
{'user': user,
'num_issues_created': num_issues_created,
'num_issues_reviewed': num_issues_reviewed,
},
context_instance=RequestContext(request))
# Use time expired cache because the number of issues will change over time
memcache.add('user_popup:' + user.email(), popup_html, 60)
return popup_html
@post_required
def incoming_chat(request):
"""/_ah/xmpp/message/chat/
This handles incoming XMPP (chat) messages.
Just reply saying we ignored the chat.
"""
try:
msg = xmpp.Message(request.POST)
except xmpp.InvalidMessageError, err:
logging.warn('Incoming invalid chat message: %s' % err)
return HttpResponse('')
sts = msg.reply('Sorry, Rietveld does not support chat input')
logging.debug('XMPP status %r', sts)
return HttpResponse('')
@post_required
def incoming_mail(request, recipients):
"""/_ah/mail/(.*)
Handle incoming mail messages.
The issue is not modified. No reviewers or CC's will be added or removed.
"""
try:
_process_incoming_mail(request.raw_post_data, recipients)
except InvalidIncomingEmailError, err:
logging.debug(str(err))
return HttpResponse('')
def _process_incoming_mail(raw_message, recipients):
"""Process an incoming email message."""
recipients = [x[1] for x in email.utils.getaddresses([recipients])]
incoming_msg = mail.InboundEmailMessage(raw_message)
if 'X-Google-Appengine-App-Id' in incoming_msg.original:
raise InvalidIncomingEmailError('Mail sent by App Engine')
subject = incoming_msg.subject or ''
match = re.search(r'\(issue *(?P<id>\d+)\)$', subject)
if match is None:
raise InvalidIncomingEmailError('No issue id found: %s', subject)
issue_id = int(match.groupdict()['id'])
issue = models.Issue.get_by_id(issue_id)
if issue is None:
raise InvalidIncomingEmailError('Unknown issue ID: %d' % issue_id)
sender = email.utils.parseaddr(incoming_msg.sender)[1]
body = None
for content_type, payload in incoming_msg.bodies('text/plain'):
body = payload.decode()
break
if body is None or not body.strip():
raise InvalidIncomingEmailError('Ignoring empty message.')
elif len(body) > django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE:
# see issue325, truncate huge bodies
trunc_msg = '... (message truncated)'
body = body[:django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE - len(trunc_msg)]
body += trunc_msg
# If the subject is long, this might come wrapped into more than one line.
subject = ' '.join([x.strip() for x in subject.splitlines()])
msg = models.Message(issue=issue, parent=issue,
subject=subject,
sender=db.Email(sender),
recipients=[db.Email(x) for x in recipients],
date=datetime.datetime.now(),
text=db.Text(body),
draft=False)
msg.put()
# Add sender to reviewers if needed.
all_emails = [str(x).lower()
for x in [issue.owner.email()]+issue.reviewers+issue.cc]
if sender.lower() not in all_emails:
query = models.Account.all().filter('lower_email =', sender.lower())
account = query.get()
if account is not None:
issue.reviewers.append(account.email) # e.g. account.email is CamelCase
else:
issue.reviewers.append(db.Email(sender))
issue.put()
@login_required
def xsrf_token(request):
"""/xsrf_token - Return the user's XSRF token.
This is used by tools like git-cl that need to be able to interact with the
site on the user's behalf. A custom header named X-Requesting-XSRF-Token must
be included in the HTTP request; an error is returned otherwise.
"""
if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):
return HttpResponse('Please include a header named X-Requesting-XSRF-Token '
'(its content doesn\'t matter).', status=400)
return HttpResponse(models.Account.current_user_account.get_xsrf_token(),
mimetype='text/plain')
def customized_upload_py(request):
"""/static/upload.py - Return patched upload.py with appropiate auth type and
default review server setting.
This is used to let the user download a customized upload.py script
for hosted Rietveld instances.
"""
f = open(django_settings.UPLOAD_PY_SOURCE)
source = f.read()
f.close()
# When served from a Google Apps instance, the account namespace needs to be
# switched to "Google Apps only".
if ('AUTH_DOMAIN' in request.META
and request.META['AUTH_DOMAIN'] != 'gmail.com'):
source = source.replace('AUTH_ACCOUNT_TYPE = "GOOGLE"',
'AUTH_ACCOUNT_TYPE = "HOSTED"')
# On a non-standard instance, the default review server is changed to the
# current hostname. This might give weird results when using versioned appspot
# URLs (eg. 1.latest.codereview.appspot.com), but this should only affect
# testing.
if request.META['HTTP_HOST'] != 'codereview.appspot.com':
review_server = request.META['HTTP_HOST']
if request.is_secure():
review_server = 'https://' + review_server
source = source.replace('DEFAULT_REVIEW_SERVER = "codereview.appspot.com"',
'DEFAULT_REVIEW_SERVER = "%s"' % review_server)
return HttpResponse(source, content_type='text/x-python')
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diff rendering in HTML for Rietveld."""
# Python imports
import re
import cgi
import difflib
import logging
import urlparse
# AppEngine imports
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import db
# Django imports
from django.template import loader, RequestContext
# Local imports
import models
import patching
import intra_region_diff
class FetchError(Exception):
"""Exception raised by FetchBase() when a URL problem occurs."""
# NOTE: The SplitPatch function is duplicated in upload.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def ParsePatchSet(patchset):
"""Patch a patch set into individual patches.
Args:
patchset: a models.PatchSet instance.
Returns:
A list of models.Patch instances.
"""
patches = []
for filename, text in SplitPatch(patchset.data):
patches.append(models.Patch(patchset=patchset, text=ToText(text),
filename=filename, parent=patchset))
return patches
def FetchBase(base, patch):
"""Fetch the content of the file to which the file is relative.
Args:
base: the base property of the Issue to which the Patch belongs.
patch: a models.Patch instance.
Returns:
A models.Content instance.
Raises:
FetchError: For any kind of problem fetching the content.
"""
filename, lines = patch.filename, patch.lines
rev = patching.ParseRevision(lines)
if rev is not None:
if rev == 0:
# rev=0 means it's a new file.
return models.Content(text=db.Text(u''), parent=patch)
# AppEngine can only fetch URLs that db.Link() thinks are OK,
# so try converting to a db.Link() here.
try:
base = db.Link(base)
except db.BadValueError:
msg = 'Invalid base URL for fetching: %s' % base
logging.warn(msg)
raise FetchError(msg)
url = _MakeUrl(base, filename, rev)
logging.info('Fetching %s', url)
try:
result = urlfetch.fetch(url)
except Exception, err:
msg = 'Error fetching %s: %s: %s' % (url, err.__class__.__name__, err)
logging.warn('FetchBase: %s', msg)
raise FetchError(msg)
if result.status_code != 200:
msg = 'Error fetching %s: HTTP status %s' % (url, result.status_code)
logging.warn('FetchBase: %s', msg)
raise FetchError(msg)
return models.Content(text=ToText(UnifyLinebreaks(result.content)),
parent=patch)
def _MakeUrl(base, filename, rev):
"""Helper for FetchBase() to construct the URL to fetch.
Args:
base: The base property of the Issue to which the Patch belongs.
filename: The filename property of the Patch instance.
rev: Revision number, or None for head revision.
Returns:
A URL referring to the given revision of the file.
"""
scheme, netloc, path, params, query, fragment = urlparse.urlparse(base)
if netloc.endswith(".googlecode.com"):
# Handle Google code repositories
if rev is None:
raise FetchError("Can't access googlecode.com without a revision")
if not path.startswith("/svn/"):
raise FetchError( "Malformed googlecode.com URL (%s)" % base)
path = path[5:] # Strip "/svn/"
url = "%s://%s/svn-history/r%d/%s/%s" % (scheme, netloc, rev,
path, filename)
return url
elif netloc.endswith("sourceforge.net") and rev is not None:
if path.strip().endswith("/"):
path = path.strip()[:-1]
else:
path = path.strip()
splitted_path = path.split("/")
url = "%s://%s/%s/!svn/bc/%d/%s/%s" % (scheme, netloc,
"/".join(splitted_path[1:3]), rev,
"/".join(splitted_path[3:]),
filename)
return url
# Default for viewvc-based URLs (svn.python.org)
url = base
if not url.endswith('/'):
url += '/'
url += filename
if rev is not None:
url += '?rev=%s' % rev
return url
DEFAULT_CONTEXT = 10
DEFAULT_COLUMN_WIDTH = 80
MIN_COLUMN_WIDTH = 3
MAX_COLUMN_WIDTH = 2000
def RenderDiffTableRows(request, old_lines, chunks, patch,
colwidth=DEFAULT_COLUMN_WIDTH, debug=False,
context=DEFAULT_CONTEXT):
"""Render the HTML table rows for a side-by-side diff for a patch.
Args:
request: Django Request object.
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
patch: A models.Patch instance.
colwidth: Optional column width (default 80).
debug: Optional debugging flag (default False).
context: Maximum number of rows surrounding a change (default CONTEXT).
Yields:
Strings, each of which represents the text rendering one complete
pair of lines of the side-by-side diff, possibly including comments.
Each yielded string may consist of several <tr> elements.
"""
rows = _RenderDiffTableRows(request, old_lines, chunks, patch,
colwidth, debug)
return _CleanupTableRowsGenerator(rows, context)
def RenderDiff2TableRows(request, old_lines, old_patch, new_lines, new_patch,
colwidth=DEFAULT_COLUMN_WIDTH, debug=False,
context=DEFAULT_CONTEXT):
"""Render the HTML table rows for a side-by-side diff between two patches.
Args:
request: Django Request object.
old_lines: List of lines representing the patched file on the left.
old_patch: The models.Patch instance corresponding to old_lines.
new_lines: List of lines representing the patched file on the right.
new_patch: The models.Patch instance corresponding to new_lines.
colwidth: Optional column width (default 80).
debug: Optional debugging flag (default False).
context: Maximum number of visible context lines (default DEFAULT_CONTEXT).
Yields:
Strings, each of which represents the text rendering one complete
pair of lines of the side-by-side diff, possibly including comments.
Each yielded string may consist of several <tr> elements.
"""
rows = _RenderDiff2TableRows(request, old_lines, old_patch,
new_lines, new_patch, colwidth, debug)
return _CleanupTableRowsGenerator(rows, context)
def _CleanupTableRowsGenerator(rows, context):
"""Cleanup rows returned by _TableRowGenerator for output.
Args:
rows: List of tuples (tag, text)
context: Maximum number of visible context lines.
Yields:
Rows marked as 'equal' are possibly contracted using _ShortenBuffer().
Stops on rows marked as 'error'.
"""
buffer = []
for tag, text in rows:
if tag == 'equal':
buffer.append(text)
continue
else:
for t in _ShortenBuffer(buffer, context):
yield t
buffer = []
yield text
if tag == 'error':
yield None
break
if buffer:
for t in _ShortenBuffer(buffer, context):
yield t
def _ShortenBuffer(buffer, context):
"""Render a possibly contracted series of HTML table rows.
Args:
buffer: a list of strings representing HTML table rows.
context: Maximum number of visible context lines. If None all lines are
returned.
Yields:
If the buffer has fewer than 3 times context items, yield all
the items. Otherwise, yield the first context items, a single
table row representing the contraction, and the last context
items.
"""
if context is None or len(buffer) < 3*context:
for t in buffer:
yield t
else:
last_id = None
for t in buffer[:context]:
m = re.match('^<tr( name="hook")? id="pair-(?P<rowcount>\d+)">', t)
if m:
last_id = int(m.groupdict().get("rowcount"))
yield t
skip = len(buffer) - 2*context
expand_link = []
if skip > 3*context:
expand_link.append(('<a href="javascript:M_expandSkipped(%(before)d, '
'%(after)d, \'t\', %(skip)d)">'
'Expand %(context)d before'
'</a> | '))
expand_link.append(('<a href="javascript:M_expandSkipped(%(before)d, '
'%(after)d, \'a\', %(skip)d)">Expand all</a>'))
if skip > 3*context:
expand_link.append((' | '
'<a href="javascript:M_expandSkipped(%(before)d, '
'%(after)d, \'b\', %(skip)d)">'
'Expand %(context)d after'
'</a>'))
expand_link = ''.join(expand_link) % {'before': last_id+1,
'after': last_id+skip,
'skip': last_id,
'context': max(context, None)}
yield ('<tr id="skip-%d"><td colspan="2" align="center" '
'style="background:lightblue">'
'(...skipping <span id="skipcount-%d">%d</span> matching lines...) '
'<span id="skiplinks-%d">%s</span> '
'<span id="skiploading-%d" style="visibility:hidden;">Loading...</span>'
'</td></tr>\n' % (last_id, last_id, skip,
last_id, expand_link, last_id))
for t in buffer[-context:]:
yield t
def _RenderDiff2TableRows(request, old_lines, old_patch, new_lines, new_patch,
colwidth=DEFAULT_COLUMN_WIDTH, debug=False):
"""Internal version of RenderDiff2TableRows().
Args:
The same as for RenderDiff2TableRows.
Yields:
Tuples (tag, row) where tag is an indication of the row type.
"""
old_dict = {}
new_dict = {}
for patch, dct in [(old_patch, old_dict), (new_patch, new_dict)]:
# XXX GQL doesn't support OR yet... Otherwise we'd be using that.
for comment in models.Comment.gql(
'WHERE patch = :1 AND left = FALSE ORDER BY date', patch):
if comment.draft and comment.author != request.user:
continue # Only show your own drafts
comment.complete(patch)
lst = dct.setdefault(comment.lineno, [])
lst.append(comment)
return _TableRowGenerator(old_patch, old_dict, len(old_lines)+1, 'new',
new_patch, new_dict, len(new_lines)+1, 'new',
_GenerateTriples(old_lines, new_lines),
colwidth, debug, request)
def _GenerateTriples(old_lines, new_lines):
"""Helper for _RenderDiff2TableRows yielding input for _TableRowGenerator.
Args:
old_lines: List of lines representing the patched file on the left.
new_lines: List of lines representing the patched file on the right.
Yields:
Tuples (tag, old_slice, new_slice) where tag is a tag as returned by
difflib.SequenceMatchser.get_opcodes(), and old_slice and new_slice
are lists of lines taken from old_lines and new_lines.
"""
sm = difflib.SequenceMatcher(None, old_lines, new_lines)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
yield tag, old_lines[i1:i2], new_lines[j1:j2]
def _GetComments(request):
"""Helper that returns comments for a patch.
Args:
request: Django Request object.
Returns:
A 2-tuple of (old, new) where old/new are dictionaries that holds comments
for that file, mapping from line number to a Comment entity.
"""
old_dict = {}
new_dict = {}
# XXX GQL doesn't support OR yet... Otherwise we'd be using
# .gql('WHERE patch = :1 AND (draft = FALSE OR author = :2) ORDER BY data',
# patch, request.user)
for comment in models.Comment.gql('WHERE patch = :1 ORDER BY date',
request.patch):
if comment.draft and comment.author != request.user:
continue # Only show your own drafts
comment.complete(request.patch)
if comment.left:
dct = old_dict
else:
dct = new_dict
dct.setdefault(comment.lineno, []).append(comment)
return old_dict, new_dict
def _RenderDiffTableRows(request, old_lines, chunks, patch,
colwidth=DEFAULT_COLUMN_WIDTH, debug=False):
"""Internal version of RenderDiffTableRows().
Args:
The same as for RenderDiffTableRows.
Yields:
Tuples (tag, row) where tag is an indication of the row type.
"""
old_dict = {}
new_dict = {}
if patch:
old_dict, new_dict = _GetComments(request)
old_max, new_max = _ComputeLineCounts(old_lines, chunks)
return _TableRowGenerator(patch, old_dict, old_max, 'old',
patch, new_dict, new_max, 'new',
patching.PatchChunks(old_lines, chunks),
colwidth, debug, request)
def _TableRowGenerator(old_patch, old_dict, old_max, old_snapshot,
new_patch, new_dict, new_max, new_snapshot,
triple_iterator, colwidth=DEFAULT_COLUMN_WIDTH,
debug=False, request=None):
"""Helper function to render side-by-side table rows.
Args:
old_patch: First models.Patch instance.
old_dict: Dictionary with line numbers as keys and comments as values (left)
old_max: Line count of the patch on the left.
old_snapshot: A tag used in the comments form.
new_patch: Second models.Patch instance.
new_dict: Same as old_dict, but for the right side.
new_max: Line count of the patch on the right.
new_snapshot: A tag used in the comments form.
triple_iterator: Iterator that yields (tag, old, new) triples.
colwidth: Optional column width (default 80).
debug: Optional debugging flag (default False).
Yields:
Tuples (tag, row) where tag is an indication of the row type and
row is an HTML fragment representing one or more <td> elements.
"""
diff_params = intra_region_diff.GetDiffParams(dbg=debug)
ndigits = 1 + max(len(str(old_max)), len(str(new_max)))
indent = 1 + ndigits
old_offset = new_offset = 0
row_count = 0
# Render a row with a message if a side is empty or both sides are equal.
if old_patch == new_patch and (old_max == 0 or new_max == 0):
if old_max == 0:
msg_old = '(Empty)'
else:
msg_old = ''
if new_max == 0:
msg_new = '(Empty)'
else:
msg_new = ''
yield '', ('<tr><td class="info">%s</td>'
'<td class="info">%s</td></tr>' % (msg_old, msg_new))
elif old_patch is None or new_patch is None:
msg_old = msg_new = ''
if old_patch is None:
msg_old = '(no file at all)'
if new_patch is None:
msg_new = '(no file at all)'
yield '', ('<tr><td class="info">%s</td>'
'<td class="info">%s</td></tr>' % (msg_old, msg_new))
elif old_patch != new_patch and old_patch.lines == new_patch.lines:
yield '', ('<tr><td class="info" colspan="2">'
'(Both sides are equal)</td></tr>')
for tag, old, new in triple_iterator:
if tag.startswith('error'):
yield 'error', '<tr><td><h3>%s</h3></td></tr>\n' % cgi.escape(tag)
return
old1 = old_offset
old_offset = old2 = old1 + len(old)
new1 = new_offset
new_offset = new2 = new1 + len(new)
old_buff = []
new_buff = []
frag_list = []
do_ir_diff = tag == 'replace' and intra_region_diff.CanDoIRDiff(old, new)
for i in xrange(max(len(old), len(new))):
row_count += 1
old_lineno = old1 + i + 1
new_lineno = new1 + i + 1
old_valid = old1+i < old2
new_valid = new1+i < new2
# Start rendering the first row
frags = []
if i == 0 and tag != 'equal':
# Mark the first row of each non-equal chunk as a 'hook'.
frags.append('<tr name="hook"')
else:
frags.append('<tr')
frags.append(' id="pair-%d">' % row_count)
old_intra_diff = ''
new_intra_diff = ''
if old_valid:
old_intra_diff = old[i]
if new_valid:
new_intra_diff = new[i]
frag_list.append(frags)
if do_ir_diff:
# Don't render yet. Keep saving state necessary to render the whole
# region until we have encountered all the lines in the region.
old_buff.append([old_valid, old_lineno, old_intra_diff])
new_buff.append([new_valid, new_lineno, new_intra_diff])
else:
# We render line by line as usual if do_ir_diff is false
old_intra_diff = intra_region_diff.Break(
old_intra_diff, 0, colwidth, "\n" + " "*indent)
new_intra_diff = intra_region_diff.Break(
new_intra_diff, 0, colwidth, "\n" + " "*indent)
old_buff_out = [[old_valid, old_lineno,
(old_intra_diff, True, None)]]
new_buff_out = [[new_valid, new_lineno,
(new_intra_diff, True, None)]]
for tg, frag in _RenderDiffInternal(old_buff_out, new_buff_out,
ndigits, tag, frag_list,
do_ir_diff,
old_dict, new_dict,
old_patch, new_patch,
old_snapshot, new_snapshot,
colwidth, debug, request):
yield tg, frag
frag_list = []
if do_ir_diff:
# So this was a replace block which means that the whole region still
# needs to be rendered.
old_lines = [b[2] for b in old_buff]
new_lines = [b[2] for b in new_buff]
ret = intra_region_diff.IntraRegionDiff(old_lines, new_lines,
diff_params)
old_chunks, new_chunks, ratio = ret
old_tag = 'old'
new_tag = 'new'
old_diff_out = intra_region_diff.RenderIntraRegionDiff(
old_lines, old_chunks, old_tag, ratio,
limit=colwidth, indent=indent, mark_tabs=True,
dbg=debug)
new_diff_out = intra_region_diff.RenderIntraRegionDiff(
new_lines, new_chunks, new_tag, ratio,
limit=colwidth, indent=indent, mark_tabs=True,
dbg=debug)
for (i, b) in enumerate(old_buff):
b[2] = old_diff_out[i]
for (i, b) in enumerate(new_buff):
b[2] = new_diff_out[i]
for tg, frag in _RenderDiffInternal(old_buff, new_buff,
ndigits, tag, frag_list,
do_ir_diff,
old_dict, new_dict,
old_patch, new_patch,
old_snapshot, new_snapshot,
colwidth, debug, request):
yield tg, frag
old_buff = []
new_buff = []
def _CleanupTableRows(rows):
"""Cleanup rows returned by _TableRowGenerator.
Args:
rows: Sequence of (tag, text) tuples.
Yields:
Rows marked as 'equal' are possibly contracted using _ShortenBuffer().
Stops on rows marked as 'error'.
"""
buffer = []
for tag, text in rows:
if tag == 'equal':
buffer.append(text)
continue
else:
for t in _ShortenBuffer(buffer):
yield t
buffer = []
yield text
if tag == 'error':
yield None
break
if buffer:
for t in _ShortenBuffer(buffer):
yield t
def _RenderDiffInternal(old_buff, new_buff, ndigits, tag, frag_list,
do_ir_diff, old_dict, new_dict,
old_patch, new_patch,
old_snapshot, new_snapshot,
colwidth, debug, request):
"""Helper for _TableRowGenerator()."""
obegin = (intra_region_diff.BEGIN_TAG %
intra_region_diff.COLOR_SCHEME['old']['match'])
nbegin = (intra_region_diff.BEGIN_TAG %
intra_region_diff.COLOR_SCHEME['new']['match'])
oend = intra_region_diff.END_TAG
nend = oend
user = users.get_current_user()
for i in xrange(len(old_buff)):
tg = tag
old_valid, old_lineno, old_out = old_buff[i]
new_valid, new_lineno, new_out = new_buff[i]
old_intra_diff, old_has_newline, old_debug_info = old_out
new_intra_diff, new_has_newline, new_debug_info = new_out
frags = frag_list[i]
# Render left text column
frags.append(_RenderDiffColumn(old_patch, old_valid, tag, ndigits,
old_lineno, obegin, oend, old_intra_diff,
do_ir_diff, old_has_newline, 'old'))
# Render right text column
frags.append(_RenderDiffColumn(new_patch, new_valid, tag, ndigits,
new_lineno, nbegin, nend, new_intra_diff,
do_ir_diff, new_has_newline, 'new'))
# End rendering the first row
frags.append('</tr>\n')
if debug:
frags.append('<tr>')
if old_debug_info:
frags.append('<td class="debug-info">%s</td>' %
old_debug_info.replace('\n', '<br>'))
else:
frags.append('<td></td>')
if new_debug_info:
frags.append('<td class="debug-info">%s</td>' %
new_debug_info.replace('\n', '<br>'))
else:
frags.append('<td></td>')
frags.append('</tr>\n')
if old_patch or new_patch:
# Start rendering the second row
if ((old_valid and old_lineno in old_dict) or
(new_valid and new_lineno in new_dict)):
tg += '_comment'
frags.append('<tr class="inline-comments" name="hook">')
else:
frags.append('<tr class="inline-comments">')
# Render left inline comments
frags.append(_RenderInlineComments(old_valid, old_lineno, old_dict,
user, old_patch, old_snapshot, 'old',
request))
# Render right inline comments
frags.append(_RenderInlineComments(new_valid, new_lineno, new_dict,
user, new_patch, new_snapshot, 'new',
request))
# End rendering the second row
frags.append('</tr>\n')
# Yield the combined fragments
yield tg, ''.join(frags)
def _RenderDiffColumn(patch, line_valid, tag, ndigits, lineno, begin, end,
intra_diff, do_ir_diff, has_newline, prefix):
"""Helper function for _RenderDiffInternal().
Returns:
A rendered column.
"""
if line_valid:
cls_attr = '%s%s' % (prefix, tag)
if tag == 'equal':
lno = '%*d' % (ndigits, lineno)
else:
lno = _MarkupNumber(ndigits, lineno, 'u')
if tag == 'replace':
col_content = ('%s%s %s%s' % (begin, lno, end, intra_diff))
# If IR diff has been turned off or there is no matching new line at
# the end then switch to dark background CSS style.
if not do_ir_diff or not has_newline:
cls_attr = cls_attr + '1'
else:
col_content = '%s %s' % (lno, intra_diff)
return '<td class="%s" id="%scode%d">%s</td>' % (cls_attr, prefix,
lineno, col_content)
else:
return '<td class="%sblank"></td>' % prefix
def _RenderInlineComments(line_valid, lineno, data, user,
patch, snapshot, prefix, request):
"""Helper function for _RenderDiffInternal().
Returns:
Rendered comments.
"""
comments = []
if line_valid:
comments.append('<td id="%s-line-%s">' % (prefix, lineno))
if lineno in data:
comments.append(
_ExpandTemplate('inline_comment.html',
request,
user=user,
patch=patch,
patchset=patch.patchset,
issue=patch.patchset.issue,
snapshot=snapshot,
side='a' if prefix == 'old' else 'b',
comments=data[lineno],
lineno=lineno,
))
comments.append('</td>')
else:
comments.append('<td></td>')
return ''.join(comments)
def RenderUnifiedTableRows(request, parsed_lines):
"""Render the HTML table rows for a unified diff for a patch.
Args:
request: Django Request object.
parsed_lines: List of tuples for each line that contain the line number,
if they exist, for the old and new file.
Returns:
A list of html table rows.
"""
old_dict, new_dict = _GetComments(request)
rows = []
for old_line_no, new_line_no, line_text in parsed_lines:
row1_id = row2_id = ''
# When a line is unchanged (i.e. both old_line_no and new_line_no aren't 0)
# pick the old column line numbers when adding a comment.
if old_line_no:
row1_id = 'id="oldcode%d"' % old_line_no
row2_id = 'id="old-line-%d"' % old_line_no
elif new_line_no:
row1_id = 'id="newcode%d"' % new_line_no
row2_id = 'id="new-line-%d"' % new_line_no
if line_text[0] == '+':
style = 'udiffadd'
elif line_text[0] == '-':
style = 'udiffremove'
else:
style = ''
rows.append('<tr><td class="udiff %s" %s>%s</td></tr>' %
(style, row1_id, cgi.escape(line_text)))
frags = []
if old_line_no in old_dict or new_line_no in new_dict:
frags.append('<tr class="inline-comments" name="hook">')
if old_line_no in old_dict:
dct = old_dict
line_no = old_line_no
snapshot = 'old'
else:
dct = new_dict
line_no = new_line_no
snapshot = 'new'
frags.append(_RenderInlineComments(True, line_no, dct, request.user,
request.patch, snapshot, snapshot, request))
else:
frags.append('<tr class="inline-comments">')
frags.append('<td ' + row2_id +'></td>')
frags.append('</tr>')
rows.append(''.join(frags))
return rows
def _ComputeLineCounts(old_lines, chunks):
"""Compute the length of the old and new sides of a diff.
Args:
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
Returns:
A tuple (old_len, new_len) representing len(old_lines) and
len(new_lines), where new_lines is the list representing the
result of applying the patch chunks to old_lines, however, without
actually computing new_lines.
"""
old_len = len(old_lines)
new_len = old_len
if chunks:
(old_a, old_b), (new_a, new_b), old_lines, new_lines = chunks[-1]
new_len += new_b - old_b
return old_len, new_len
def _MarkupNumber(ndigits, number, tag):
"""Format a number in HTML in a given width with extra markup.
Args:
ndigits: the total width available for formatting
number: the number to be formatted
tag: HTML tag name, e.g. 'u'
Returns:
An HTML string that displays as ndigits wide, with the
number right-aligned and surrounded by an HTML tag; for example,
_MarkupNumber(42, 4, 'u') returns ' <u>42</u>'.
"""
formatted_number = str(number)
space_prefix = ' ' * (ndigits - len(formatted_number))
return '%s<%s>%s</%s>' % (space_prefix, tag, formatted_number, tag)
def _ExpandTemplate(name, request, **params):
"""Wrapper around django.template.loader.render_to_string().
For convenience, this takes keyword arguments instead of a dict.
"""
rslt = loader.render_to_string(name, params,
context_instance=RequestContext(request))
return rslt.encode('utf-8')
def ToText(text):
"""Helper to turn a string into a db.Text instance.
Args:
text: a string.
Returns:
A db.Text instance.
"""
if isinstance(text, unicode):
# A TypeError is raised if text is unicode and an encoding is given.
return db.Text(text)
else:
try:
return db.Text(text, encoding='utf-8')
except UnicodeDecodeError:
return db.Text(text, encoding='latin-1')
def UnifyLinebreaks(text):
"""Helper to return a string with all line breaks converted to LF.
Args:
text: a string.
Returns:
A string with all line breaks converted to LF.
"""
return text.replace('\r\n', '\n').replace('\r', '\n')
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_PERFORCE.lower(): VCS_PERFORCE,
"p4": VCS_PERFORCE,
VCS_GIT.lower(): VCS_GIT,
VCS_CVS.lower(): VCS_CVS,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
self.info = args.get("Info", None)
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >>sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >>sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >>sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(
usage="%prog [options] [-- diff_options] [path...]",
add_help_option=False
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Send notification email to reviewers, with a diff of "
"the changes included as an attachment instead of "
"inline. Also prepends 'PATCH:' to the email subject. "
"(implies --send_mail)")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
local_email = email
if local_email is None:
local_email = GetEmail("Email (login for uploading to %s)" % server)
password = None
if keyring:
try:
password = keyring.get_password(host, local_email)
except:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
else:
password = getpass.getpass("Password for %s: " % local_email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(host, local_email, password)
return (local_email, password)
return rpc_server_class(server,
GetUserCredentials,
host_override=host_override,
save_cookies=save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (stdout, stderr, return code)
"""
logging.info("Running %s", command)
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code."""
out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
if line.startswith("URL: "):
url = line.split()[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start,
self._EscapeFilename(dirname) or "."]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
return RunShell(["git", "diff", "--no-ext-diff", "--full-index",
"--ignore-submodules", "-M"] + extra_args, env=env)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_binary = self.IsBinaryData(base_content)
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.message:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_message = description["desc"].strip()
lines = raw_message.splitlines()
if len(lines):
options.message = lines[0]
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary and self.IsImage(relpath):
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, message):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = "Use '--help -v' to show additional Perforce options."
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type)
form_fields = [("subject", message)]
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
logging.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
payload = ""
if options.send_patch:
payload=urllib.urlencode({"attach_patch": "yes"})
rpc_server.Send("/" + issue + "/mail", payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
import BasicEditor | Python |
'''
Created on 2010-04-19
@author: Philippe Beaudoin
'''
from distutils.core import setup
import py2exe
setup(windows=['BasicEditor.py'],
options={
"py2exe": {
"includes": ["ctypes", "logging"],
"excludes": ["OpenGL"],
}
}
) | Python |
import MathLib
x = MathLib.Vector3d()
x.setValues(10,0.44,-132)
print "x = ",
x._print()
y = MathLib.Vector3d()
y.setValues(3,11,2)
print "y = ",
y._print()
x.addScaledVector(y,0.5)
print "x + 0.5y = ",
x._print()
| Python |
import Utils
Utils.test() | Python |
'''
Created on 2009-09-29
@author: beaudoin
'''
import PyUtils
class Curve(PyUtils.Observable):
"""This class contains a named curve that can be observed."""
def __init__(self, name, trajectory1d, phiPtr = None):
"""Initializes a curve with the given name and attached to the given trajectory."""
self._name = str(name) # No unicode string pass this point
self._trajectory1d = trajectory1d
self._phiPtr = phiPtr
def getName(self):
"""Returns the curve name."""
return self._name
def setName(self, name):
"""Returns the curve name."""
self._name = str(name) # No unicode string pass this point
self.notifyObservers()
def getTrajectory1d(self):
"""Returns the attached trajectory."""
return self._trajectory1d
def getPhiPtr(self):
"""Returns the attached pointer to the phase."""
return self._phiPtr | Python |
'''
Created on 2009-08-26
@author: beaudoin
'''
from OpenGL.GL import *
from OpenGL.GLU import *
import PyUtils, wx, Physics, Utils, time, math, sys, Core
from ObservableList import ObservableList
from Curve import Curve
from SnapshotTree import SnapshotBranch
from MathLib import Vector3d, Point3d
def _printout( text ):
"""Private. Redirect the passed text to stdout."""
sys.stdout.write(text)
# Make sure the DLLs use the python stdout for printout
Utils.registerPrintFunction( _printout )
class SNMApp(wx.App):
"""A simple class that should handle almost everything a simbicon application typically needs."""
def __init__(self, appTitle="Simbicon Application",
fps = 30.0,
dt = 1/2000.0,
glCanvasSize=wx.DefaultSize,
size=wx.DefaultSize, redirect=False, filename=None,
useBestVisual=False, clearSigInt=True, showConsole=True):
"""
appTitle is the window title
fps is the desired number of frames per seconds
dt is the desired simulation timestep
:see: wx.BasicApp.__init__`
"""
wx.App.__init__(self, redirect, filename, useBestVisual, clearSigInt)
# No annoying error logging window
wx.Log.SetActiveTarget(wx.LogStderr())
import UI
# Setup the main window style
style = wx.DEFAULT_FRAME_STYLE
if size == wx.DefaultSize :
size = wx.GetDisplaySize()
size.height *= 0.75
size.width *= 0.75
if glCanvasSize == wx.DefaultSize :
style |= wx.MAXIMIZE
# Setup the environment for the python interactive console
consoleEnvironment = {
"wx" : wx,
"Physics" : Physics,
"Utils" : Utils }
exec "from MathLib import *\n" + \
"app = wx.GetApp()\n" + \
"from PyUtils import load" in consoleEnvironment, consoleEnvironment
# Create the main window
self._frame = UI.MainWindow(None, -1, appTitle, size = size, style = style,
fps = fps, glCanvasSize = glCanvasSize,
showConsole = showConsole,
consoleEnvironment = consoleEnvironment)
# Define GL callbacks
self._glCanvas = self._frame.getGLCanvas()
self._glCanvas.addDrawCallback( self.draw )
self._glCanvas.addPostDrawCallback( self.postDraw )
self._glCanvas.addOncePerFrameCallback( self.advanceAnimation )
self._glCanvas.setDrawAxes(False)
self._glCanvas.setPrintLoad(True)
self._glCanvas.setCameraTargetFunction( self.cameraTargetFunction )
# Get the tool panel
self._toolPanel = self._frame.getToolPanel()
# Show the application
self._frame.Show()
# Set-up starting state
self._dt = dt
self._drawShadows = True
self._simulationSecondsPerSecond = 1 # 1 = real time, 2 = twice real time, 1/2 = half real time
self._animationRunning = False
self._cameraFollowCharacter = False
self._drawCollisionVolumes = False
self._followedCharacter = None # Pointer to focused character
self._captureScreenShots = False
self._printStepReport = True
self._screenShotNumber = 0
self._worldOracle = Core.WorldOracle()
self._worldOracle.initializeWorld( Physics.world() )
self._kinematicMotion = False
# Set-up starting list of characters and controllers
self._characters = []
# Define the observables
self._controllerList = ObservableList()
self._characterObservable = PyUtils.Observable()
self._animationObservable = PyUtils.Observable()
self._cameraObservable = PyUtils.Observable()
self._optionsObservable = PyUtils.Observable()
self._curveList = ObservableList()
self._snapshotTree = SnapshotBranch()
#
# Private methods
def draw(self):
"""Draw the content of the world"""
world = Physics.world()
glEnable(GL_LIGHTING)
if self._drawCollisionVolumes:
world.drawRBs(Physics.SHOW_MESH|Physics.SHOW_CD_PRIMITIVES)
else:
world.drawRBs(Physics.SHOW_MESH|Physics.SHOW_COLOURS)
# world.drawRBs(Physics.SHOW_MESH|Physics.SHOW_CD_PRIMITIVES)
glDisable(GL_LIGHTING);
if self._drawShadows:
self._glCanvas.beginShadows()
world.drawRBs(Physics.SHOW_MESH)
self._glCanvas.endShadows()
def postDraw(self):
"""Perform some operation once the entire OpenGL window has been drawn"""
if self._captureScreenShots:
self._glCanvas.saveScreenshot("../screenShots/%04d.bmp" % self._screenShotNumber )
self._screenShotNumber += 1
def advanceAnimation(self):
"""Called once per frame"""
if self._animationRunning :
self.simulationFrame()
def simulationFrame(self):
"""Performs enough simulation steps to fill one frame"""
# Enough time elapsed perform simulation loop and render
simulationSeconds = 1.0/self._glCanvas.getFps() * self._simulationSecondsPerSecond
nbSteps = int( math.ceil( simulationSeconds / self._dt ) )
for i in range(0,nbSteps):
self.simulationStep()
def advanceAnimationUntilControllerEnds(self, controller):
"""Advances the animation until the specified controller reaches the end.
Specify either a name, an index, or an instance of a controller object."""
controller = self.getController(controller)
initialPhi = controller.getPhase()
currPhi = initialPhi + 1
while currPhi > initialPhi :
self.simulationStep()
currPhi = controller.getPhase()
def simulationStep(self):
"""Performs a single simulation step"""
# TODO Quite hacky
if self._kinematicMotion:
import KeyframeEditor
from MathLib import Trajectory3dv
try:
pc = self._posableCharacter
traj = self._stanceFootToSwingFootTrajectory
except AttributeError:
pc = self._posableCharacter = KeyframeEditor.PosableCharacter.PosableCharacter(self.getCharacter(0),self.getController(0))
traj = self._stanceFootToSwingFootTrajectory = Trajectory3dv()
traj.addKnot(0,Vector3d(-0.13,0,-0.4))
traj.addKnot(0.5,Vector3d(-0.13,0.125,0))
traj.addKnot(1,Vector3d(-0.13,0,0.4))
self._phase = 0
self._stance = Core.LEFT_STANCE
stanceToSwing = traj.evaluate_catmull_rom(self._phase)
if self._stance == Core.RIGHT_STANCE:
stanceToSwing.x = stanceToSwing.x * -1
pc.updatePose( self._phase, stanceToSwing, self._stance, True )
self._phase += 0.00069
if self._phase >= 1.0:
self._phase = 0
if self._stance == Core.LEFT_STANCE:
self._stance = Core.RIGHT_STANCE
else:
self._stance = Core.LEFT_STANCE
return
world = Physics.world()
controllers = self._controllerList._objects
contactForces = world.getContactForces()
for controller in controllers :
controller.performPreTasks(self._dt, contactForces)
world.advanceInTime(self._dt)
contactForces = world.getContactForces()
for controller in controllers :
if controller.performPostTasks(self._dt, contactForces) :
step = Vector3d (controller.getStanceFootPos(), controller.getSwingFootPos())
step = controller.getCharacterFrame().inverseRotate(step);
v = controller.getV()
phi = controller.getPhase()
if self._printStepReport:
print "step: %3.5f %3.5f %3.5f. Vel: %3.5f %3.5f %3.5f phi = %f" % ( step.x, step.y, step.z, v.x, v.y, v.z, phi)
def cameraTargetFunction(self, currentTarget):
"""Private! Return the point to target, or None if nothing to target."""
if not self._cameraFollowCharacter or self._followedCharacter == None:
return None
pos = self._followedCharacter.getRoot().getCMPosition()
pos.y = currentTarget.y
return pos
#
# Accessors
def getWorldOracle(self):
"""Return the world oracle for the application."""
return self._worldOracle
def getFrame(self):
"""Returns the application frame."""
return self._frame
def setDrawShadows(self, drawShadows):
"""Indicates whether the app should draw shadows or not"""
self._drawShadows = drawShadows
def getDrawShadows(self):
"""Checks if the app is drawing shadows"""
return self._drawShadows
def getGLCanvas(self):
"""Returns the GL canvas"""
return self._glCanvas
def getToolPanel(self):
"""Returns the tool panel"""
return self._toolPanel
def setAnimationRunning(self, animationRunning):
"""Indicates whether the animation should run or not"""
self._animationRunning = animationRunning
self._animationObservable.notifyObservers()
def isAnimationRunning(self):
"""Return true if the animation is currently running"""
return self._animationRunning
def setSimulationSecondsPerSecond(self, simulationSecondsPerSecond):
"""Sets the speed of the playback. 1 is realtime, 0.5 is slower, 2 is faster"""
self._simulationSecondsPerSecond = simulationSecondsPerSecond
self._animationObservable.notifyObservers()
def getSimulationSecondsPerSecond(self):
"""Return the speed of the playback"""
return self._simulationSecondsPerSecond
def setCameraFollowCharacter(self, follow):
"""Indicates whether the camera should follow a character or not"""
if follow != self._cameraFollowCharacter:
# Need to toggle
self._cameraFollowCharacter = follow
if self._followedCharacter == None :
try: self._followedCharacter = self._characters[0]
except IndexError: pass
self._cameraObservable.notifyObservers()
def doesCameraFollowCharacter(self):
"""Checks if the camera is currently following a character."""
return self._cameraFollowCharacter and self._followedCharacter != None
def setFollowedCharacter(self, character):
"""Indicates which character the camera should be following. Pass an index of a string."""
character = self.getCharacter(character)
self._cameraFollowCharacter = True
self._followedCharacter = character
self._cameraObservable.notifyObservers()
def setCameraAutoOrbit(self, autoOrbit):
"""Indicates whether the camera should automatically orbit or not"""
self._glCanvas.setCameraAutoOrbit(autoOrbit)
def doesCameraAutoOrbit(self):
"""Checks if the camera is currently automatically orbiting."""
return self._glCanvas.doesCameraAutoOrbit()
def drawCollisionVolumes(self, draw):
"""Indicates whether the application should draw collision volumes"""
if draw != self._drawCollisionVolumes:
self._drawCollisionVolumes = draw
self._optionsObservable.notifyObservers()
def getDrawCollisionVolumes(self):
"""Does the application draw collision volumes?"""
return self._drawCollisionVolumes
def setKinematicMotion(self, kinematicMotion):
"""Indicates whether the application should animate only kinematic motion"""
if kinematicMotion != self._kinematicMotion:
self._kinematicMotion = kinematicMotion
self._optionsObservable.notifyObservers()
def getKinematicMotion(self):
"""Does the application animate only kinematic motion?"""
return self._kinematicMotion
def captureScreenShots(self, capture):
"""Indicates whether the application should capture a screenshot at every frame."""
if capture != self._captureScreenShots:
self._captureScreenShots = capture
self._optionsObservable.notifyObservers()
def getCaptureScreenShots(self):
"""Does the application capture a screenshot at every frame?"""
return self._captureScreenShots
#
# Public methods
def deleteAllObjects(self):
"""Delete all objects: characters, rigid bodies, snapshots, etc."""
if self._followedCharacter is not None :
self._followedCharacter = None
self._cameraFollowCharacter = False
self._cameraObservable.notifyObservers()
self._characters = []
self._controllerList.clear()
import Physics
Physics.world().destroyAllObjects()
self.deleteAllSnapshots()
def addCharacter(self, character):
"""Adds a character to the application and the world"""
import Physics
if PyUtils.sameObjectInList(character, self._characters) :
raise KeyError ('Cannot add the same character twice to application.')
Physics.world().addArticulatedFigure( character )
self._characters.append( character )
if self._followedCharacter is None :
self._followedCharacter = character
self._cameraFollowCharacter = True
self._cameraObservable.notifyObservers()
self._characterObservable.notifyObservers()
def deleteCharacter(self, character):
"""Removes a character from the application. Specify either a name, an index, or an instance of a character object."""
character = self.getCharacter(character)
if self._followedCharacter is character :
self._followedCharacter = None
self._cameraFollowCharacter = False
self._cameraObservable.notifyObservers()
self._characters.remove(character)
self._characterObservable.notifyObservers()
def getCharacter(self, description ):
"""Returns a character. Specify either a name, an index. Anything else will be returned unmodified."""
if isinstance(description,basestring) :
try:
description = [char.getName() for char in self._characters].index(description)
except ValueError: raise ValueError( "No character found with the specified name." )
if isinstance(description,int) :
return self._characters[description]
return description
def getCharacterCount(self):
"""Returns the number of characters."""
return len( self._character )
def recenterCharacter(self, character):
"""Reposition the character at the center of the world in X,Z. Specify either a name, an index, or an instance of a character object."""
character = self.getCharacter(character)
character.recenter()
def addController(self, controller):
"""Adds a controller to the application"""
return self._controllerList.add(controller)
def deleteController(self, controller):
"""Removes a controller from the application. Specify either a name, an index, or an instance of a controller object."""
return self._controllerList.delete(controller)
def getController(self, description):
"""Returns a controller. Specify either a name, an index. Anything else will be returned unmodified."""
return self._controllerList.get(description)
def getControllerCount(self):
"""Returns the number of controllers."""
return self._controllerList.getCount()
def getControllerList(self):
"""Returns the controller list object. Useful for observation."""
return self._controllerList
def addCurve(self, name, trajectory1d, phiPtr = None):
"""Adds a curve to the application"""
return self._curveList.add( Curve(name, trajectory1d, phiPtr) )
def deleteCurve(self, curve):
"""Removes a curve from the application. Specify either a name, an index, or an instance of a controller object."""
return self._curveList.delete(curve)
def clearCurves(self):
"""Remove all the curves from the application."""
self._curveList.clear();
def getCurve(self, description):
"""Returns a curve. Specify either a name, an index. Anything else will be returned unmodified."""
return self._curveList.get(description)
def getCurveCount(self):
"""Returns the number of curves."""
return self._curveList.getCount()
def getCurveList(self):
"""Returns the curve list object. Useful for observation."""
return self._curveList
def getSnapshotTree(self):
"""Returns the top-level SnapshotBranch that can be observed."""
return self._snapshotTree
def takeSnapshot(self):
"""Take a snapshot of the world.
The snapshot will be returned and added to the snapshot tree."""
return self._snapshotTree.takeSnapshot()
def restoreActiveSnapshot(self, restoreControllerParams = True):
"""Restores the current snapshot. Return it."""
return self._snapshotTree.restoreActive(restoreControllerParams)
def previousSnapshot(self, restoreControllerParams = True):
"""Navigate to the previous snapshot. Return it, or None if failed."""
return self._snapshotTree.previousSnapshot(restoreControllerParams)
def nextSnapshot(self, restoreControllerParams = True):
"""Navigate to the next snapshot. Return it, or None if failed."""
return self._snapshotTree.nextSnapshot(restoreControllerParams)
def deleteAllSnapshots(self):
"""Delete all the snapshots of the world."""
self._snapshotTree = SnapshotBranch()
#
# For observers
#
def addCharacterObserver(self, observer):
self._characterObservable.addObserver(observer)
def deleteCharacterObserver(self, observer):
self._characterObservable.deleteObserver(observer)
def addControllerObserver(self, observer):
self._controllerList.addObserver(observer)
def deleteControllerObserver(self, observer):
self._controllerList.deleteObserver(observer)
def addAnimationObserver(self, observer):
self._animationObservable.addObserver(observer)
def deleteAnimationObserver(self, observer):
self._animationObservable.deleteObserver(observer)
def addCameraObserver(self, observer):
self._cameraObservable.addObserver(observer)
def deleteCameraObserver(self, observer):
self._cameraObservable.deleteObserver(observer)
def addOptionsObserver(self, observer):
self._optionsObservable.addObserver(observer)
def deleteOptionsObserver(self, observer):
self._optionsObservable.deleteObserver(observer)
| Python |
'''
Created on 2009-09-28
@author: beaudoin
'''
import PyUtils
class ObservableList(PyUtils.Observable):
"""An object that contains a list of observable objects of the application (i.e. controllers, characters...)"""
def __init__(self):
super(ObservableList,self).__init__()
self._objects = []
def add(self, object):
"""Adds an object to the list, notify observers."""
if PyUtils.sameObjectInList(object, self._objects) :
raise KeyError ('Cannot add the same object twice to application.')
self._objects.append( object )
self.notifyObservers()
return object
def delete(self, object):
"""Removes an object from the list. Specify either a name, an index, or an instance of the object."""
self._objects.remove( self.get(object) )
self.notifyObservers()
def clear(self):
"""Removes all objects from the list."""
del self._objects[:]
self.notifyObservers()
def get(self, description):
"""
Gets the object at the specified index, or with the specified name.
If description is a string, an object with the specified name will be searched.
If search fails or objects do not have a getName method, a ValueError is raised.
If description is an int, the controller at the specified index will be returned.
If the index is invalid, an IndexError is raised.
Otherwise, the input is returned unmodified. (So, if a controller object is passed, it is returned.)
"""
if isinstance(description,basestring) :
try:
description = [obj.getName() for obj in self._objects].index(description)
except (AttributeError, ValueError): raise ValueError( "No object found with the specified name." )
if isinstance(description,int) :
return self._objects[description]
return description
def getCount(self):
"""Returns the number of controllers."""
return len( self._objects )
| Python |
'''
Created on 2009-10-02
@author: beaudoin
'''
import PyUtils, Core, time, Physics, Utils, wx
class Snapshot(PyUtils.Observable):
"""This class contains a snapshot. That is, the entire state of the world, the controllers and the character."""
def __init__(self, parentBranch):
"""Takes a shot of a world, add it to the specified branch."""
super(Snapshot,self).__init__()
self._time = time.localtime()
# Save the world
world = Physics.world()
self._worldState = Utils.DynamicArrayDouble()
world.getState( self._worldState )
# Save the controllers
app = wx.GetApp()
self._controllers = []
for i in range(app.getControllerCount()):
controller = app.getController(i)
controllerState = Core.SimBiControllerState()
controller.getControllerState( controllerState )
self._controllers.append( (controllerState, PyUtils.wrapCopy( controller )) )
self._parentBranch = parentBranch
self._childBranches = []
self._activeIndex = -1
#
# Public methods
#
def restore(self, restoreControllerParams = True):
"""Restores this snapshot, sets it as active."""
self._parentBranch._setActiveSnapshot( self )
self._restore(restoreControllerParams)
self.notifyObservers()
def getBranch(self, index):
"""Retrieves a branch."""
return self._childBranches[index]
def getBranchCount(self):
"""Retrieves the number of branches."""
return len( self._childBranches )
def getName(self):
"""Retrieves a string for that snapshot."""
return "Snapshot at %(time)s" % { "time" : time.strftime("%H:%M:%S", self._time) }
#
# Private methods
#
def _takeSnapshot(self):
"""Takes a snapshot in the active branch, or add a new active branch if required."""
if self._activeIndex != -1 :
snapshot = self._childBranches[self._activeIndex].takeSnapshot()
if snapshot is not None : return snapshot
# Need to add a new branch
self._childBranches.append( SnapshotBranch(self) )
self._activeIndex = len(self._childBranches) - 1
self.notifyObservers()
return self._childBranches[self._activeIndex].takeSnapshot()
def _restore(self, restoreControllerParams = True):
"""Restores this snapshot, does sets it as active. Should only be called by SnapshotBranch."""
if self._activeIndex >= 0 :
# Make sure the selected branch is deactivated
self._childBranches[self._activeIndex]._deactivate()
# Restore the world
world = Physics.world()
world.setState( self._worldState )
# Restore the controllers
app = wx.GetApp()
assert app.getControllerCount() == len(self._controllers), "Controller list doesn't match snapshot!"
for i in range(app.getControllerCount()):
controller = app.getController(i)
controllerState, wrappedController = self._controllers[i]
if restoreControllerParams :
controller.beginBatchChanges()
try: wrappedController.fillObject(controller)
finally: controller.endBatchChanges()
controller.setControllerState( controllerState )
self.notifyObservers()
def _deactivateAllBranches(self):
"""Indicates that no branch should be active. In other words, the main branch is active."""
self._activeIndex = -1
def _setActiveBranch(self, branch):
"""Indicates that the specified branch should be the active one."""
self._parentBranch._setActiveSnapshot(self)
if self._activeIndex < 0 or branch is not self._childBranches[self._activeIndex] :
for i, myBranch in enumerate( self._childBranches ) :
if myBranch is branch :
self._activeIndex = i
return
raise RuntimeError( "Desired active branch not found!" )
def _getCurrentSnapshot(self):
if self._activeIndex < 0 : return self
return self._childBranches[self._activeIndex].getCurrentSnapshot()
def _restoreActive(self, restoreControllerParams = True):
"""Restores the active snapshot. Can be this one or one in its subbranches."""
if self._activeIndex >= 0 :
snapshot = self._childBranches[self._activeIndex].restoreActive(restoreControllerParams)
if snapshot is not None:
return snapshot
self._restore(restoreControllerParams)
return self
def _previousSnapshot(self, restoreControllerParams = True):
"""Restore previous snapshot and set it as active. Return false if no previous snapshot could be restored,
in which case the previous branch should navigate."""
if self._activeIndex < 0 : return None
return self._childBranches[self._activeIndex].previousSnapshot(restoreControllerParams)
def _nextSnapshot(self, restoreControllerParams = True):
"""Navigates down active branch. Return false if no active branch."""
if self._activeIndex < 0 : return None
return self._childBranches[self._activeIndex].nextSnapshot(restoreControllerParams)
class SnapshotBranch(PyUtils.Observable):
"""A list of world snapshots, including controller states and parameters."""
def __init__(self, parentSnapshot = None):
"""Pass the parent snapshot or None if this is the root branch."""
super(SnapshotBranch,self).__init__()
self._parentSnapshot = parentSnapshot
self._snapshots = []
self._activeIndex = -1
#
# Public methods
#
def takeSnapshot(self):
"""Take a new snapshot and add it at the correct position in this branch, or one of its subbranches.
Return the snapshot if it was taken, None otherwise."""
if self._activeIndex == len(self._snapshots) - 1:
snapshot = Snapshot(self)
self._snapshots.append( snapshot )
self._activeIndex = len(self._snapshots) - 1
self.notifyObservers()
return snapshot
elif self._activeIndex > -1 :
return self._snapshots[self._activeIndex]._takeSnapshot()
else:
return None
def getCurrentSnapshot(self):
"""Returns the currently active snapshot."""
if self._activeIndex < 0 : return self._parentSnapshot
return self._snapshots[self._activeIndex]._getCurrentSnapshot()
def restoreActive(self, restoreControllerParams = True):
"""Restores the currently active snapshot. Returns false if nothing was restored. """
if self._activeIndex < 0 :
return None
return self._snapshots[self._activeIndex]._restoreActive(restoreControllerParams)
def previousSnapshot(self, restoreControllerParams = True):
"""Restore previous snapshot and set it as active. Return false if no previous snapshot could be restored,
in which case the previous branch should navigate."""
if self._activeIndex < 0 :
return None
snapshot = self._snapshots[self._activeIndex]._previousSnapshot(restoreControllerParams)
if snapshot is not None :
return snapshot
# Cannot be handled by active snapshot, go down this branch
if self._activeIndex == 0 and self._parentSnapshot is None :
return self._snapshots[0]
self._activeIndex -= 1
if self._activeIndex == -1:
self._parentSnapshot._restore(restoreControllerParams)
return self._parentSnapshot
self._snapshots[self._activeIndex]._deactivateAllBranches()
self._snapshots[self._activeIndex]._restore(restoreControllerParams)
return self._snapshots[self._activeIndex]
def nextSnapshot(self, restoreControllerParams = True):
"""Restore next snapshot and set it as active. Return false if no next snapshot could be restored."""
if self._activeIndex >= 0 :
snapshot = self._snapshots[self._activeIndex]._nextSnapshot(restoreControllerParams)
if snapshot is not None:
return snapshot
if self._activeIndex < len( self._snapshots ) - 1 :
self._activeIndex += 1
self._snapshots[self._activeIndex]._restore(restoreControllerParams)
return self._snapshots[self._activeIndex]
def getSnapshot(self, index):
"""Access the snapshot at the specified index. Will not change the active snapshot."""
return self._snapshots[index]
def getSnapshotCount(self):
"""Access the total number of snapshots."""
return len(self._snapshots)
#
# Private methods
#
def _deactivate(self):
"""Indicates that this branch is not active and the parent snapshot should be selected."""
self._activeIndex = -1
def _setActiveSnapshot(self, snapshot):
"""Makes sure the specified snapshot is the active one."""
if self._parentSnapshot is not None:
self._parentSnapshot._setActiveBranch(self)
if self._activeIndex < 0 or snapshot is not self._snapshots[self._activeIndex] :
for i, mySnapshot in enumerate( self._snapshots ) :
if mySnapshot is snapshot :
self._activeIndex = i
return
raise RuntimeError( "Desired active snapshot not found!" )
| Python |
'''
Created on 2009-11-20
@author: beaudoin
'''
from OpenGL.GL import *
import wx, GLUtils, PyUtils, math, traceback, sys
import UI
class CharacterScaler(UI.GLUITools.WindowWithControlPoints):
"""Base class for a simple GL window that display a character with handles to scale its various elements."""
def __init__( self, parent, characterDescription, x=0, y=0, width=0, height=0, minWidth=-1, minHeight=-1 ):
super(CharacterScaler,self).__init__(parent,x,y,width,height, minWidth, minHeight, boundsY = (-0.1,2.1), forceAspectRatio = 'x')
self._characterDescription = characterDescription
class CharacterScalerFront(CharacterScaler):
"""A simple GL window that display a character from the front view with handles to scale its various elements."""
def __init__( self, parent, characterDescription, x=0, y=0, width=0, height=0, minWidth=-1, minHeight=-1 ):
super(CharacterScalerFront,self).__init__(parent,characterDescription,x,y,width,height, minWidth, minHeight)
self.addControlPoint( FrontFootControlPoint(characterDescription,-1) )
self.addControlPoint( FrontFootControlPoint(characterDescription,1) )
self.addControlPoint( FrontKneeControlPoint(characterDescription,-1) )
self.addControlPoint( FrontKneeControlPoint(characterDescription,1) )
self.addControlPoint( FrontLegControlPoint(characterDescription,-1) )
self.addControlPoint( FrontLegControlPoint(characterDescription,1) )
self.addControlPoint( FrontLegAnchorControlPoint(characterDescription,-1) )
self.addControlPoint( FrontLegAnchorControlPoint(characterDescription,1) )
self.addControlPoint( FrontPelvisControlPoint(characterDescription) )
self.addControlPoint( FrontChestControlPoint(characterDescription) )
self.addControlPoint( FrontShoulderControlPoint(characterDescription) )
self.addControlPoint( FrontNeckControlPoint(characterDescription) )
self.addControlPoint( FrontHeadTopControlPoint(characterDescription) )
self.addControlPoint( FrontHeadSideControlPoint(characterDescription) )
self.addControlPoint( FrontElbowControlPoint(characterDescription,-1) )
self.addControlPoint( FrontElbowControlPoint(characterDescription,1) )
self.addControlPoint( FrontWristControlPoint(characterDescription,-1) )
self.addControlPoint( FrontWristControlPoint(characterDescription,1) )
def drawContent(self):
"""Draw the character from front view."""
cd = self._characterDescription
glColor3f(1,1,1)
try:
for side in (-1,1):
glColor4f( 0.5, 0.5, 0.5, 0.84 )
glBegin(GL_QUADS)
halfSize = cd.getFootSizeX(side)/2.0
glVertex2d( cd.getLegPosX(side) - halfSize, cd.getGroundPosY() )
glVertex2d( cd.getLegPosX(side) - halfSize, cd.getAnklePosY(side) )
glVertex2d( cd.getLegPosX(side) + halfSize, cd.getAnklePosY(side) )
glVertex2d( cd.getLegPosX(side) + halfSize, cd.getGroundPosY() )
glEnd()
glColor4f( 0.5, 0.6, 0.8, 0.84 )
glBegin(GL_QUADS)
halfSize = cd.getLowerLegDiameter(side)/2.0
glVertex2d( cd.getLegPosX(side) - halfSize, cd.getAnklePosY(side) )
glVertex2d( cd.getLegPosX(side) - halfSize, cd.getKneePosY(side) )
glVertex2d( cd.getLegPosX(side) + halfSize, cd.getKneePosY(side) )
glVertex2d( cd.getLegPosX(side) + halfSize, cd.getAnklePosY(side) )
glEnd()
glColor4f( 0.5, 0.6, 0.8, 0.84 )
glBegin(GL_QUADS)
halfSize = cd.getUpperLegDiameter(side)/2.0
glVertex2d( cd.getLegPosX(side) - halfSize, cd.getKneePosY(side) )
glVertex2d( cd.getLegPosX(side) - halfSize, cd.getHipPosY() )
glVertex2d( cd.getLegPosX(side) + halfSize, cd.getHipPosY() )
glVertex2d( cd.getLegPosX(side) + halfSize, cd.getKneePosY(side) )
glEnd()
glColor4f( 0.5, 0.6, 0.8, 0.84 )
glBegin(GL_QUADS)
glVertex2d( 0, cd.getHipPosY() )
glVertex2d( side*cd.getPelvisDiameter()/2.0, cd.getHipPosY() )
glVertex2d( side*cd.getPelvisDiameter()/2.0, cd.getWaistPosY() )
glVertex2d( 0, cd.getWaistPosY() )
glEnd()
glColor4f( 0.5, 0.8, 0.6, 0.84 )
glBegin(GL_POLYGON)
glVertex2d( 0, cd.getWaistPosY() )
glVertex2d( side*cd.getTorsoDiameter()/4.0, cd.getWaistPosY() )
glVertex2d( side*cd.getTorsoDiameter()/2.0, cd.getChestPosY() )
glVertex2d( side*cd.getTorsoDiameter()/2.0, cd.getShoulderPosY() )
glVertex2d( 0, cd.getShoulderPosY() )
glEnd()
glColor4f( 0.892, 0.716, 0.602, 0.84 )
glBegin(GL_QUADS)
glVertex2d( 0, cd.getShoulderPosY() )
glVertex2d( side*0.1*cd.getHeadSizeX(), cd.getShoulderPosY() )
glVertex2d( side*0.1*cd.getHeadSizeX(), cd.getNeckPosY() )
glVertex2d( 0, cd.getNeckPosY() )
glEnd()
glColor4f( 0.892, 0.716, 0.602, 0.84 )
glVertex2f( 0, cd.getNeckPosY() + cd.getHeadSizeY()/2.0 )
glBegin(GL_TRIANGLE_FAN)
for i in range(21):
theta = math.pi * i/20.0
x = side * math.sin(theta) * cd.getHeadSizeX()/2.0
y = math.cos(theta) * cd.getHeadSizeY()/2.0 + cd.getNeckPosY() + cd.getHeadSizeY()/2.0
glVertex2f( x, y )
glEnd()
glColor4f( 0.5, 0.8, 0.6, 0.84 )
glBegin(GL_QUADS)
halfSize = cd.getUpperArmDiameter(side)/2.0
glVertex2d( cd.getArmPosX(side)-halfSize, cd.getShoulderPosY() )
glVertex2d( cd.getArmPosX(side)-halfSize, cd.getElbowPosY(side) )
glVertex2d( cd.getArmPosX(side)+halfSize, cd.getElbowPosY(side) )
glVertex2d( cd.getArmPosX(side)+halfSize, cd.getShoulderPosY() )
glEnd()
glColor4f( 0, 0, 0, 1 )
glBegin(GL_LINES)
glVertex2d( cd.getArmPosX(side)-side*halfSize, cd.getShoulderPosY() )
glVertex2d( cd.getArmPosX(side)-side*halfSize, cd.getChestPosY() )
glEnd()
glColor4f( 0.892, 0.716, 0.602, 0.84 )
glBegin(GL_QUADS)
halfSize = cd.getLowerArmDiameter(side)/2.0
glVertex2d( cd.getArmPosX(side)-halfSize, cd.getElbowPosY(side) )
glVertex2d( cd.getArmPosX(side)-halfSize, cd.getWristPosY(side) )
glVertex2d( cd.getArmPosX(side)+halfSize, cd.getWristPosY(side) )
glVertex2d( cd.getArmPosX(side)+halfSize, cd.getElbowPosY(side) )
glEnd()
# Draw control points
glPointSize( 8.0 )
glColor3d(1,1,0)
glBegin( GL_POINTS );
for controlPoint in self._controlPoints:
controlPoint.draw()
glEnd()
except Exception as e:
glEnd()
print "Exception while drawing scaled character interface: " + str(e)
traceback.print_exc(file=sys.stdout)
class CharacterControlPoint(UI.GLUITools.ControlPoint):
def __init__( self, characterDescription):
"""Pass the character description and a 4-tuple (minX, maxX, minY, maxY)."""
super(CharacterControlPoint,self).__init__()
self._characterDescription = characterDescription
class SymmetricCharacterControlPoint(CharacterControlPoint):
def __init__( self, characterDescription, side ):
super(SymmetricCharacterControlPoint,self).__init__(characterDescription)
self._side = side
def draw(self):
if self._characterDescription.isSymmetric() and self._side == -1 : return
super(SymmetricCharacterControlPoint,self).draw()
class FrontFootControlPoint(SymmetricCharacterControlPoint):
def __init__(self, characterDescription, side):
super(FrontFootControlPoint,self).__init__(characterDescription, side)
def getPos(self):
cd = self._characterDescription
return ( cd.getLegPosX(self._side) + self._side*cd.getFootSizeX(self._side)/2.0, cd.getAnklePosY(self._side) )
def setPos(self, pos):
cd = self._characterDescription
cd.setFootSizeX( self._side, (pos[0]-cd.getLegPosX(self._side))*2.0*self._side )
cd.setAnklePosY( self._side, pos[1] )
class FrontKneeControlPoint(SymmetricCharacterControlPoint):
def __init__(self, characterDescription, side):
super(FrontKneeControlPoint,self).__init__(characterDescription, side)
def getPos(self):
cd = self._characterDescription
return ( cd.getLegPosX(self._side) + self._side*cd.getLowerLegDiameter(self._side)/2.0, cd.getKneePosY(self._side) )
def setPos(self, pos):
cd = self._characterDescription
cd.setLowerLegDiameter( self._side, (pos[0]-cd.getLegPosX(self._side))*2.0*self._side )
cd.setKneePosY( self._side, pos[1] )
class FrontLegControlPoint(SymmetricCharacterControlPoint):
def __init__(self, characterDescription, side):
super(FrontLegControlPoint,self).__init__(characterDescription, side)
def getPos(self):
cd = self._characterDescription
return ( cd.getLegPosX(self._side) + self._side*cd.getUpperLegDiameter(self._side)/2.0, cd.getHipPosY() )
def setPos(self, pos):
cd = self._characterDescription
cd.setUpperLegDiameter( self._side, (pos[0]-cd.getLegPosX(self._side))*2.0*self._side )
cd.setHipPosY( pos[1] )
class FrontLegAnchorControlPoint(SymmetricCharacterControlPoint):
def __init__(self, characterDescription, side):
super(FrontLegAnchorControlPoint,self).__init__(characterDescription, side)
def getPos(self):
cd = self._characterDescription
return ( cd.getLegPosX(self._side), cd.getHipPosY() )
def setPos(self, pos):
cd = self._characterDescription
cd.setLegPosX( self._side, pos[0] )
class FrontPelvisControlPoint(CharacterControlPoint):
def __init__(self, characterDescription):
super(FrontPelvisControlPoint,self).__init__(characterDescription)
def getPos(self):
cd = self._characterDescription
return ( cd.getPelvisDiameter()/2.0, cd.getWaistPosY() )
def setPos(self, pos):
cd = self._characterDescription
cd.setPelvisDiameter( pos[0]*2.0 )
cd.setWaistPosY( pos[1] )
class FrontChestControlPoint(CharacterControlPoint):
def __init__(self, characterDescription):
super(FrontChestControlPoint,self).__init__(characterDescription)
def getPos(self):
cd = self._characterDescription
return ( cd.getTorsoDiameter()/2.0, cd.getChestPosY() )
def setPos(self, pos):
cd = self._characterDescription
cd.setTorsoDiameter( pos[0]*2.0 )
cd.setChestPosY( pos[1] )
class FrontShoulderControlPoint(CharacterControlPoint):
def __init__(self, characterDescription):
super(FrontShoulderControlPoint,self).__init__(characterDescription)
def getPos(self):
cd = self._characterDescription
return ( cd.getTorsoDiameter()/2.0, cd.getShoulderPosY() )
def setPos(self, pos):
cd = self._characterDescription
cd.setTorsoDiameter( pos[0]*2.0 )
cd.setShoulderPosY( pos[1] )
class FrontNeckControlPoint(CharacterControlPoint):
def __init__(self, characterDescription):
super(FrontNeckControlPoint,self).__init__(characterDescription)
def getPos(self):
cd = self._characterDescription
return ( 0, cd.getNeckPosY() )
def setPos(self, pos):
cd = self._characterDescription
cd.setNeckPosY( pos[1] )
class FrontHeadTopControlPoint(CharacterControlPoint):
def __init__(self, characterDescription):
super(FrontHeadTopControlPoint,self).__init__(characterDescription)
def getPos(self):
cd = self._characterDescription
return ( 0, cd.getNeckPosY() + cd.getHeadSizeY() )
def setPos(self, pos):
cd = self._characterDescription
cd.setHeadSizeY( pos[1]-cd.getNeckPosY() )
class FrontHeadSideControlPoint(CharacterControlPoint):
def __init__(self, characterDescription):
super(FrontHeadSideControlPoint,self).__init__(characterDescription)
def getPos(self):
cd = self._characterDescription
return ( cd.getHeadSizeX()/2.0, cd.getNeckPosY() + cd.getHeadSizeY()/2.0 )
def setPos(self, pos):
cd = self._characterDescription
cd.setHeadSizeX( pos[0]*2.0 )
class FrontElbowControlPoint(SymmetricCharacterControlPoint):
def __init__(self, characterDescription, side):
super(FrontElbowControlPoint,self).__init__(characterDescription, side)
def getPos(self):
cd = self._characterDescription
return ( self._side*(cd.getTorsoDiameter()/2.0 + cd.getUpperArmDiameter(self._side)), cd.getElbowPosY(self._side) )
def setPos(self, pos):
cd = self._characterDescription
cd.setUpperArmDiameter( self._side, pos[0]*self._side - cd.getTorsoDiameter()/2.0 )
cd.setElbowPosY( self._side, pos[1] )
class FrontWristControlPoint(SymmetricCharacterControlPoint):
def __init__(self, characterDescription, side):
super(FrontWristControlPoint,self).__init__(characterDescription, side)
def getPos(self):
cd = self._characterDescription
return ( cd.getArmPosX(self._side) + self._side*cd.getLowerArmDiameter(self._side)/2.0, cd.getWristPosY(self._side) )
def setPos(self, pos):
cd = self._characterDescription
cd.setLowerArmDiameter( self._side, (pos[0]-cd.getArmPosX(self._side))*2.0*self._side )
cd.setWristPosY( self._side, pos[1] )
| Python |
'''
Created on 2009-11-20
@author: beaudoin
'''
import Core, Physics, PyUtils
from MathLib import Vector3d, Point3d
class CharacterDescription(object):
"""A simple description of a character
Be careful:
Left refer to the left-hand-side of the character (which is seen to the right from the front view)
So Right goes towards negative X, while Left goes towards positive X
"""
def getNativeVar(self, varName):
"""Access a native variable by name."""
return self.__getattribute__(varName).get()
def getNativeVarSymmetric(self, varName, side):
"""Access a symmetric native variable by name and side."""
return self.__getattribute__(varName).getSide(side)
def setNativeVar(self, varName, value):
"""Sets a native variable by name."""
variable = self.__getattribute__(varName)
if variable.get() == value : return
self._indirectVarsAreValid = False
variable.set(value)
def setNativeVarSymmetric(self, varName, side, value):
"""Sets a symmetric native variable by name and side."""
variable = self.__getattribute__(varName)
if variable.getSide(side) == value : return
if self._isSymmetric : side = 0
self._indirectVarsAreValid = False
variable.setSide( side, value )
def getIndirectVar(self, varName):
"""Access an indirect variable by name."""
if not self._indirectVarsAreValid : self._computeIndirectVars()
return self.__getattribute__(varName).get()
def getIndirectVarSymmetric(self, varName, side):
"""Access a symmetric indirect variable by name and side."""
if not self._indirectVarsAreValid : self._computeIndirectVars()
return self.__getattribute__(varName).getSide(side)
def _addNativeVar(self, varName, initialValue):
"""Add a variable with he given name, as well as its getter and setter."""
if isinstance(initialValue, _Symmetric) :
self.__setattr__( PyUtils.getterName(varName), lambda side: self.getNativeVarSymmetric(varName, side) )
self.__setattr__( PyUtils.setterName(varName), lambda side, value: self.setNativeVarSymmetric(varName, side, value) )
else :
self.__setattr__( PyUtils.getterName(varName), lambda: self.getNativeVar(varName) )
self.__setattr__( PyUtils.setterName(varName), lambda value: self.setNativeVar(varName, value) )
self.__setattr__( varName, initialValue )
def _addIndirectVar(self, varName, initialValue):
"""Add an indirect variable with the given name, as well as its getter and setter."""
if isinstance(initialValue, _Symmetric) :
self.__setattr__( PyUtils.getterName(varName), lambda side: self.getIndirectVarSymmetric(varName, side) )
else:
self.__setattr__( PyUtils.getterName(varName), lambda: self.getIndirectVar(varName) )
self._indirectVarsAreValid = None
self.__setattr__(varName, initialValue)
def __init__(self):
super(CharacterDescription,self).__init__()
# Roughly based on http://www.idrawdigital.com/wp-content/uploads/2009/01/prop_male.gif
# 1 head unit = 0.2286 meters
hu = 0.2286
self._isSymmetric = True
self._indirectVarsAreValid = False
self._natives = {
'_footSizeX' : _Symmetric( 0.45 * hu, 0.6*hu, hu ),
'_footSizeZ' : _Symmetric(1 * hu, 0.1*hu ),
'_ankleRelativePosY' : _Symmetric(0.05, 0.05, 0.3 ),
'_lowerLegDiameter' : _Symmetric( 0.33 * hu, 0.2*hu, hu ),
'_upperLegDiameter' : _Symmetric( 0.4 * hu, 0.2*hu, hu ),
'_legSizeY' : _Value( 4*hu, 2*hu, 6*hu ),
'_kneeRelativePosY' : _Symmetric( 0.52, 0.2, 0.8 ),
'_legRelativeAnchorX' : _Symmetric( 0.6, 0.2, 0.8 ),
'_pelvisDiameter' : _Value( 1.1 * hu, 0.1*hu, 2.5*hu ),
'_torsoDiameter' : _Value( 1.4 * hu, hu, 2.6*hu ),
'_trunkSizeY' : _Value( 2.66 * hu, 1.8*hu, 3.5*hu ),
'_waistRelativePosY' : _Value( 0.17, 0.1, 0.4 ),
'_chestRelativePosY' : _Value( 0.5, 0.2, 0.8 ),
'_neckSizeY' : _Value( 0.05 * hu, 0.01*hu, 2*hu ),
'_headSizeX' : _Value( 0.9 * hu, 0.1*hu, 2*hu ),
'_headSizeY' : _Value( 1.1 * hu, 0.1*hu, 2*hu ),
'_headSizeZ' : _Value( 1.0 * hu, 0.1*hu, 2*hu ),
'_upperArmDiameter' : _Symmetric( 0.35 * hu, 0.2*hu, hu ),
'_lowerArmDiameter' : _Symmetric( 0.28 * hu, 0.2*hu, hu ),
'_armSizeY' : _Symmetric( 2.7 * hu, 1*hu, 5*hu ),
'_elbowRelativePosY' : _Symmetric( 0.44444444, 0.01, 0.99 ) }
for var, val in self._natives.iteritems() :
self._addNativeVar( var, val )
self._indirects = {
'_legPosX' : _Symmetric(0),
'_groundPosY' : _Value(0),
'_anklePosY' : _Symmetric(0),
'_kneePosY' : _Symmetric(0),
'_hipPosY' : _Value(0),
'_waistPosY' : _Value(0),
'_chestPosY' : _Value(0),
'_shoulderPosY' : _Value(0),
'_neckPosY' : _Value(0),
'_armPosX' : _Symmetric(0),
'_elbowPosY' : _Symmetric(0),
'_wristPosY' : _Symmetric(0)
}
for var, val in self._indirects.iteritems() :
self._addIndirectVar( var, val )
def _computeLegPosX(self, side):
self._legPosX.setSide( side, side*self._pelvisDiameter.get()/2.0*self._legRelativeAnchorX.getSide(side))
def setLegPosX(self, side, value):
self.setLegRelativeAnchorX( side, value/self._pelvisDiameter.get()*2.0*side )
def _computeAnklePosY(self, side):
self._anklePosY.setSide( side, self._groundPosY.get() + self._legSizeY.get() * self._ankleRelativePosY.getSide(side) )
def setAnklePosY(self, side, value):
self.setAnkleRelativePosY( side, (value - self._groundPosY.get())/self._legSizeY.get() )
def _computeKneePosY(self, side):
self._kneePosY.setSide( side, self._anklePosY.getSide(side) + (self._hipPosY.get()-self._anklePosY.getSide(side)) * self._kneeRelativePosY.getSide(side) )
def setKneePosY(self, side, value):
self.setKneeRelativePosY( side, (value - self._anklePosY.getSide(side)) / (self._hipPosY.get()-self._anklePosY.getSide(side)) )
def _computeHipPosY(self):
self._hipPosY.set( self._groundPosY.get() + self._legSizeY.get() )
def setHipPosY(self, value):
self.setLegSizeY( value - self._groundPosY.get() )
def _computeShoulderPosY(self):
self._shoulderPosY.set( self._hipPosY.get() + self._trunkSizeY.get() )
def setShoulderPosY(self, value):
self.setTrunkSizeY( value - self._hipPosY.get() )
def _computeWaistPosY(self):
self._waistPosY.set( self._hipPosY.get() + self._trunkSizeY.get() * self._waistRelativePosY.get() )
def setWaistPosY(self, value):
self.setWaistRelativePosY( (value - self._hipPosY.get())/self._trunkSizeY.get() )
def _computeChestPosY(self):
self._chestPosY.set( self._waistPosY.get() + (self._shoulderPosY.get()-self._waistPosY.get()) * self._chestRelativePosY.get() )
def setChestPosY(self, value):
self.setChestRelativePosY( (value - self._waistPosY.get())/(self._shoulderPosY.get()-self._waistPosY.get()) )
def _computeNeckPosY(self):
self._neckPosY.set( self._shoulderPosY.get() + self._neckSizeY.get() )
def setNeckPosY(self, value):
self.setNeckSizeY( value - self._shoulderPosY.get() )
def _computeArmPosX(self, side):
self._armPosX.setSide(side, side*(self._torsoDiameter.get() + self._upperArmDiameter.getSide(side))/2.0)
def _computeWristPosY(self, side):
self._wristPosY.setSide(side, self._shoulderPosY.get() - self._armSizeY.getSide(side))
def setWristPosY(self, side, value):
self.setArmSizeY( side, self._shoulderPosY.get() - value )
def _computeElbowPosY(self, side):
self._elbowPosY.setSide(side, self._shoulderPosY.get() - self._armSizeY.getSide(side)*self._elbowRelativePosY.getSide(side))
def setElbowPosY(self, side, value):
self.setElbowRelativePosY( side, (self._shoulderPosY.get() - value)/self._armSizeY.getSide(side) )
def _computeIndirectVars(self):
"""Compute all the variables that are not directly part of the editable character description."""
if self._indirectVarsAreValid : return
# Careful! The order in which the _compute functions are called is important!
self._groundPosY.set( 0 )
self._hipPosY.set( self._groundPosY.get() + self._legSizeY.get() )
self._computeHipPosY()
self._computeShoulderPosY()
self._computeWaistPosY()
self._computeChestPosY()
self._computeNeckPosY()
for side in (-1,1) :
self._computeLegPosX( side )
self._computeAnklePosY(side)
self._computeKneePosY(side)
self._computeArmPosX(side)
self._computeWristPosY(side)
self._computeElbowPosY(side)
self._indirectVarsAreValid = True
def isSymmetric(self):
"""True if the character should be symmetrical."""
return self._isSymmetric
def setSymmetric(self, value):
"""Indicates whether the character should be symmetrical."""
if value == self._isSymmetric : return
self._isSymmetric = value
for val in self._natives.itervalues() :
try: val.forceSymmetric()
except AttributeError: pass
self._indirectVarsAreValid = False
def createCharacter(self):
"""Creates a 3D character that follows this description."""
from App import Proxys
blue = ( 0.5, 0.6, 0.8, 1 )
green = ( 0.5, 0.8, 0.6, 1 )
red = ( 0.892, 0.716, 0.602, 1 )
gray = ( 0.5, 0.5, 0.5, 1 )
character = Core.Character()
character.setName("Instant Character")
massScale = 900
pelvisSizeY = self.getWaistPosY() - self.getHipPosY()
pelvisBottomPos = -pelvisSizeY/2.0-self.getLegSizeY()*0.1
pelvisTopPos = pelvisSizeY/2.0
pelvisRadius = self.getPelvisDiameter()/2.0
rootPosY = self.getHipPosY() + pelvisSizeY/2.0 + 0.007
pelvis = PyUtils.RigidBody.createArticulatedTaperedBox(
name = "pelvis",
size = (pelvisRadius*2.0, pelvisSizeY*1.5, pelvisRadius*1.2),
moiScale = 3,
exponentBottom = 2,
exponentSide = 2,
mass = -massScale, pos=(0, rootPosY, 0), colour = blue )
character.setRoot( pelvis )
totalLowerBackSizeY = self.getChestPosY() - self.getWaistPosY()
lowerBackOffsetY = 0 #0.15 * totalLowerBackSizeY
lowerBackSizeX = self.getTorsoDiameter() * 0.7
lowerBackSizeY = totalLowerBackSizeY - lowerBackOffsetY
lowerBackSizeZ = lowerBackSizeX * 0.7
lowerback = PyUtils.RigidBody.createArticulatedTaperedBox(
name = "lowerBack",
size = (lowerBackSizeX, lowerBackSizeY, lowerBackSizeZ),
exponentTop = 3, exponentBottom = 2, exponentSide = 2,
mass = -massScale, colour = green )
character.addArticulatedRigidBody( lowerback )
joint = Proxys.BallInSocketJoint(
name = "pelvis_lowerback",
posInParent = (0, pelvisSizeY/2.0, 0),
posInChild = (0, -lowerBackSizeY/2.0 -lowerBackOffsetY, 0),
swingAxis1 = (1, 0, 0),
twistAxis = (0, 0, 1),
limits = (-1.6, 1.6, -1.6, 1.6, -1.6, 1.6) ).createAndFillObject()
joint.setParent(pelvis)
joint.setChild(lowerback)
character.addJoint(joint)
totalTorsoSizeY = self.getShoulderPosY() - self.getChestPosY()
torsoOffsetY = -0.2 * totalTorsoSizeY
torsoSizeX = self.getTorsoDiameter()
torsoSizeY = totalTorsoSizeY - torsoOffsetY
torsoSizeZ = torsoSizeX * 0.6
torso = PyUtils.RigidBody.createArticulatedTaperedBox(
name = "torso",
size = (torsoSizeX, torsoSizeY, torsoSizeZ),
exponentTop = 2.2, exponentBottom = 4, exponentSide = 3,
mass = -massScale, colour = green )
character.addArticulatedRigidBody( torso )
joint = Proxys.BallInSocketJoint(
name = "lowerback_torso",
posInParent = (0, lowerBackSizeY/2.0, 0),
posInChild = (0, -torsoSizeY/2.0 -torsoOffsetY, 0),
swingAxis1 = (1, 0, 0),
twistAxis = (0, 0, 1),
limits = (-1.6, 1.6, -1.6, 1.6, -1.6, 1.6) ).createAndFillObject()
joint.setParent(lowerback)
joint.setChild(torso)
character.addJoint(joint)
headOffsetY = self.getNeckSizeY()
headSizeX = self.getHeadSizeX()
headSizeY = self.getHeadSizeY()
headSizeZ = self.getHeadSizeZ()
head = PyUtils.RigidBody.createArticulatedEllipsoid(
name = "head",
radius = (headSizeX/2.0, headSizeY/2.0, headSizeZ/2.0),
mass = -massScale, withMesh = False )
character.addArticulatedRigidBody( head )
head.addMeshObj( "data/StockMeshes/head.obj", Vector3d(0,-0.064,0), Vector3d(headSizeX*6.5,headSizeY*4.6,headSizeZ*5.5) )
head.setColour( *red )
head.addMesh( PyUtils.Mesh.createCylinder(
basePoint = (0,-headSizeY/2.0 - headOffsetY - torsoSizeY*0.1,0),
tipPoint = (0,-headSizeY*0.2,0),
radius = 0.12*headSizeX, colour = red ))
joint = Proxys.BallInSocketJoint(
name = "torso_head",
posInParent = (0, torsoSizeY/2.0, 0),
posInChild = (0, -headSizeY/2.0 - headOffsetY, 0),
swingAxis1 = (1, 0, 0),
twistAxis = ( 0, 0, 1),
limits = (-1.6, 1.6, -1.6, 1.6, -1.6, 1.6) ).createAndFillObject()
joint.setParent(torso)
joint.setChild(head)
character.addJoint(joint)
leftUpperArmSizeY = self.getShoulderPosY() - self.getElbowPosY(1)
leftUpperArmDiameter = self.getUpperArmDiameter(1)
lUpperArm = PyUtils.RigidBody.createArticulatedCylinder(
name = "lUpperArm",
moiScale = 3,
axis = 0, basePos = -leftUpperArmSizeY/2.0, tipPos = leftUpperArmSizeY/2.0,
radius = leftUpperArmDiameter/2.0,
mass = -massScale, colour = green )
character.addArticulatedRigidBody( lUpperArm )
lUpperArm.addMesh( PyUtils.Mesh.createSphere( (-leftUpperArmSizeY/2.0, 0, 0), leftUpperArmDiameter*0.65, colour = green ) )
lUpperArm.addMesh( PyUtils.Mesh.createSphere( (leftUpperArmSizeY/2.0, 0, 0), leftUpperArmDiameter*0.5, colour = green ) )
joint = Proxys.BallInSocketJoint(
name = "lShoulder",
posInParent = (torsoSizeX*0.52, torsoSizeY*0.32, 0),
posInChild = (-leftUpperArmSizeY/2.0, 0, 0),
swingAxis1 = (0, 0, 1),
twistAxis = (1, 0, 0),
limits = (-100, 100, -1.5, 1.5, -100, 100) ).createAndFillObject()
joint.setParent(torso)
joint.setChild(lUpperArm)
character.addJoint(joint)
rightUpperArmSizeY = self.getShoulderPosY() - self.getElbowPosY(-1)
rightUpperArmDiameter = self.getUpperArmDiameter(-1)
rUpperArm = PyUtils.RigidBody.createArticulatedCylinder(
name = "rUpperArm",
moiScale = 3,
axis = 0, basePos = -rightUpperArmSizeY/2.0, tipPos = rightUpperArmSizeY/2.0,
radius = rightUpperArmDiameter/2.0,
mass = -massScale, colour = green )
character.addArticulatedRigidBody( rUpperArm )
rUpperArm.addMesh( PyUtils.Mesh.createSphere( (rightUpperArmSizeY/2.0, 0, 0), rightUpperArmDiameter*0.65, colour = green ) )
rUpperArm.addMesh( PyUtils.Mesh.createSphere( (-rightUpperArmSizeY/2.0, 0, 0), rightUpperArmDiameter*0.5, colour = green ) )
joint = Proxys.BallInSocketJoint(
name = "rShoulder",
posInParent = (-torsoSizeX*0.52, torsoSizeY*0.32, 0),
posInChild = (rightUpperArmSizeY/2.0, 0, 0),
swingAxis1 = (0, 0, 1),
twistAxis = (1, 0, 0),
limits = (-100, 100, -1.5, 1.5, -100, 100) ).createAndFillObject()
joint.setParent(torso)
joint.setChild(rUpperArm)
character.addJoint(joint)
leftLowerArmSizeY = self.getElbowPosY(1) - self.getWristPosY(1)
leftLowerArmDiameter = self.getLowerArmDiameter(1)
lLowerArm = PyUtils.RigidBody.createArticulatedCylinder(
name = "lLowerArm",
moiScale = 3,
axis = 0, basePos = -leftLowerArmSizeY/2.0, tipPos = leftLowerArmSizeY/2.0,
radius = leftLowerArmDiameter/2.0,
mass = -massScale, colour = red )
character.addArticulatedRigidBody( lLowerArm )
lLowerArm.addMesh( PyUtils.Mesh.createTaperedBox(
position=(leftLowerArmSizeY*0.5+leftLowerArmDiameter*0.8,0,0),
size=(leftLowerArmDiameter*1.6, leftLowerArmDiameter*0.5, leftLowerArmDiameter*1.15), colour = red ) )
joint = Proxys.HingeJoint(
name = "lElbow",
posInParent = (leftUpperArmSizeY/2.0, 0, 0),
posInChild = (-leftLowerArmSizeY/2.0, 0, 0),
axis = (0, 1, 0),
limits = (-2.7, 0) ).createAndFillObject()
joint.setParent(lUpperArm)
joint.setChild(lLowerArm)
character.addJoint(joint)
rightLowerArmSizeY = self.getElbowPosY(-1) - self.getWristPosY(-1)
rightLowerArmDiameter = self.getLowerArmDiameter(-1)
rLowerArm = PyUtils.RigidBody.createArticulatedCylinder(
name = "rLowerArm",
moiScale = 3,
axis = 0, basePos = -rightLowerArmSizeY/2.0, tipPos = rightLowerArmSizeY/2.0,
radius = rightLowerArmDiameter/2.0,
mass = -massScale, colour = red )
character.addArticulatedRigidBody( rLowerArm )
rLowerArm.addMesh( PyUtils.Mesh.createTaperedBox(
position=(-rightLowerArmSizeY*0.5-rightLowerArmDiameter*0.8,0,0),
size=(rightLowerArmDiameter*1.6, rightLowerArmDiameter*0.5, rightLowerArmDiameter*1.15), colour = red ) )
joint = Proxys.HingeJoint(
name = "rElbow",
posInParent = (-rightUpperArmSizeY/2.0, 0, 0),
posInChild = (rightLowerArmSizeY/2.0, 0, 0),
axis = (0, -1, 0),
limits = (-2.7, 0) ).createAndFillObject()
joint.setParent(rUpperArm)
joint.setChild(rLowerArm)
character.addJoint(joint)
leftUpperLegSizeY = self.getHipPosY() - self.getKneePosY(1)
leftUpperLegDiameter = self.getUpperLegDiameter(1)
lUpperLeg = PyUtils.RigidBody.createArticulatedCylinder(
name = "lUpperLeg",
axis = 1, basePos = -leftUpperLegSizeY/2.0, tipPos = leftUpperLegSizeY/2.0,
radius = leftUpperLegDiameter/2.0,
moiScale = 4,
mass = -massScale, colour = blue )
character.addArticulatedRigidBody( lUpperLeg )
lUpperLeg.addMesh( PyUtils.Mesh.createSphere( (0, leftUpperLegSizeY/2.0, 0), leftUpperLegDiameter*0.5, colour = blue ) )
lUpperLeg.addMesh( PyUtils.Mesh.createSphere( (0, -leftUpperLegSizeY/2.0, 0), leftUpperLegDiameter*0.5, colour = blue ) )
joint = Proxys.BallInSocketJoint(
name = "lHip",
posInParent = (pelvisRadius*self.getLegRelativeAnchorX(1), -pelvisSizeY/2.0, 0),
posInChild = (0, leftUpperLegSizeY/2.0, 0),
swingAxis1 = (1, 0, 0),
twistAxis = ( 0, 0, 1),
limits = (-1.3, 1.9, -1, 1, -1, 1) ).createAndFillObject()
joint.setParent(pelvis)
joint.setChild(lUpperLeg)
character.addJoint(joint)
rightUpperLegSizeY = self.getHipPosY() - self.getKneePosY(-1)
rightUpperLegDiameter = self.getUpperLegDiameter(-1)
rUpperLeg = PyUtils.RigidBody.createArticulatedCylinder(
name = "rUpperLeg",
axis = 1, basePos = -rightUpperLegSizeY/2.0, tipPos = rightUpperLegSizeY/2.0,
radius = rightUpperLegDiameter/2.0,
moiScale = 4,
mass = -massScale, colour = blue )
character.addArticulatedRigidBody( rUpperLeg )
rUpperLeg.addMesh( PyUtils.Mesh.createSphere( (0, -rightUpperLegSizeY/2.0, 0), rightUpperLegDiameter*0.5, colour = blue ) )
rUpperLeg.addMesh( PyUtils.Mesh.createSphere( (0, rightUpperLegSizeY/2.0, 0), rightUpperLegDiameter*0.5, colour = blue ) )
joint = Proxys.BallInSocketJoint(
name = "rHip",
posInParent = (-pelvisRadius*self.getLegRelativeAnchorX(-1), -pelvisSizeY/2.0, 0),
posInChild = (0, rightUpperLegSizeY/2.0, 0),
swingAxis1 = (1, 0, 0),
twistAxis = ( 0, 0, 1),
limits = (-1.3, 1.9, -1, 1, -1, 1) ).createAndFillObject()
joint.setParent(pelvis)
joint.setChild(rUpperLeg)
character.addJoint(joint)
leftLowerLegSizeY = self.getKneePosY(1) - self.getAnklePosY(1)
leftLowerLegDiameter = self.getLowerLegDiameter(1)
lLowerLeg = PyUtils.RigidBody.createArticulatedCylinder(
name = "lLowerLeg",
axis = 1, basePos = -leftLowerLegSizeY/2.0, tipPos = leftLowerLegSizeY/2.0,
radius = leftLowerLegDiameter/2.0,
moiScale = 4,
mass = -massScale, colour = blue )
character.addArticulatedRigidBody( lLowerLeg )
joint = Proxys.HingeJoint(
name = "lKnee",
posInParent = (0, -leftUpperLegSizeY/2.0, 0),
posInChild = (0, leftLowerLegSizeY/2.0, 0),
axis = (1, 0, 0),
limits = (0, 2.5) ).createAndFillObject()
joint.setParent(lUpperLeg)
joint.setChild(lLowerLeg)
character.addJoint(joint)
rightLowerLegSizeY = self.getKneePosY(-1) - self.getAnklePosY(-1)
rightLowerLegDiameter = self.getLowerLegDiameter(-1)
rLowerLeg = PyUtils.RigidBody.createArticulatedCylinder(
name = "rLowerLeg",
axis = 1, basePos = -rightLowerLegSizeY/2.0, tipPos = rightLowerLegSizeY/2.0,
radius = rightLowerLegDiameter/2.0,
moiScale = 4,
mass = -massScale, colour = blue )
character.addArticulatedRigidBody( rLowerLeg )
joint = Proxys.HingeJoint(
name = "rKnee",
posInParent = (0, -rightUpperLegSizeY/2.0, 0),
posInChild = (0, rightLowerLegSizeY/2.0, 0),
axis = (1, 0, 0),
limits = (0, 2.5) ).createAndFillObject()
joint.setParent(rUpperLeg)
joint.setChild(rLowerLeg)
character.addJoint(joint)
leftFootSizeX = self.getFootSizeX(1)
leftFootSizeY = self.getAnklePosY(1) - self.getGroundPosY()
leftFootSizeZ = self.getFootSizeZ(1) * 0.75
lFoot = PyUtils.RigidBody.createArticulatedTaperedBox(
name = "lFoot",
size = (leftFootSizeX,leftFootSizeY,leftFootSizeZ),
exponentSide = 20,
groundCoeffs = (0.0005,0.2),
moiScale = 3,
mass = -massScale,
colour = gray )
character.addArticulatedRigidBody( lFoot )
joint = Proxys.UniversalJoint(
name = "lAnkle",
posInParent = (0, -leftLowerLegSizeY/2.0, 0),
posInChild = (0, leftFootSizeY/2.0, -leftFootSizeZ*0.33 + leftLowerLegDiameter/2.0),
parentAxis = (0, 0, 1),
childAxis = (1, 0, 0),
limits = (-0.75, 0.75, -0.75, 0.75) ).createAndFillObject()
joint.setParent(lLowerLeg)
joint.setChild(lFoot)
character.addJoint(joint)
rightFootSizeX = self.getFootSizeX(-1)
rightFootSizeY = self.getAnklePosY(-1) - self.getGroundPosY()
rightFootSizeZ = self.getFootSizeZ(-1) * 0.75
rFoot = PyUtils.RigidBody.createArticulatedTaperedBox(
name = "rFoot",
size = (rightFootSizeX,rightFootSizeY,rightFootSizeZ),
exponentSide = 20,
groundCoeffs = (0.0005,0.2),
moiScale = 3,
mass = -massScale, colour = gray )
character.addArticulatedRigidBody( rFoot )
joint = Proxys.UniversalJoint(
name = "rAnkle",
posInParent = (0, -rightLowerLegSizeY/2.0, 0),
posInChild = (0, rightFootSizeY/2.0, -rightFootSizeZ*0.33 + rightLowerLegDiameter/2.0),
parentAxis = (0, 0, -1),
childAxis = (1, 0, 0),
limits = (-0.75, 0.75, -0.75, 0.75) ).createAndFillObject()
joint.setParent(rLowerLeg)
joint.setChild(rFoot)
character.addJoint(joint)
leftToesSizeX = leftFootSizeX
leftToesSizeY = leftFootSizeY * 0.66
leftToesSizeZ = self.getFootSizeZ(1) - leftFootSizeZ
lToes = PyUtils.RigidBody.createArticulatedTaperedBox(
name = "lToes",
size = (leftToesSizeX,leftToesSizeY,leftToesSizeZ),
exponentSide = 20,
groundCoeffs = (0.0005,0.2),
moiScale = 3,
mass = -massScale, colour = gray )
character.addArticulatedRigidBody( lToes )
joint = Proxys.HingeJoint(
name = "lToeJoint",
posInParent = (0, (leftToesSizeY-leftFootSizeY)/2.0+0.003, leftFootSizeZ/2.0),
posInChild = (0, 0, -leftToesSizeZ/2.0),
axis = ( 1, 0, 0 ),
limits = (-0.52, 0.1) ).createAndFillObject()
joint.setParent(lFoot)
joint.setChild(lToes)
character.addJoint(joint)
rightToesSizeX = rightFootSizeX
rightToesSizeY = rightFootSizeY * 0.66
rightToesSizeZ = self.getFootSizeZ(-1) - rightFootSizeZ
rToes = PyUtils.RigidBody.createArticulatedTaperedBox(
name = "rToes",
size = (rightToesSizeX,rightToesSizeY,rightToesSizeZ),
exponentSide = 20,
groundCoeffs = (0.0005,0.2),
moiScale = 3,
mass = -massScale, colour = gray )
character.addArticulatedRigidBody( rToes )
joint = Proxys.HingeJoint(
name = "rToeJoint",
posInParent = (0, (rightToesSizeY-rightFootSizeY)/2.0+0.003, rightFootSizeZ/2.0),
posInChild = (0, 0, -rightToesSizeZ/2.0),
axis = ( 1, 0, 0 ),
limits = (-0.52, 0.1) ).createAndFillObject()
joint.setParent(rFoot)
joint.setChild(rToes)
character.addJoint(joint)
return character
class _Value(object):
def __init__(self, value, minValue=None, maxValue=None):
"""Create a native value with a given maximum and minimum"""
self._value = value
if maxValue is not None and minValue is not None and maxValue < minValue :
raise ValueError( 'Max must be equal or greater than minValue!' )
self._minValue = minValue
self._maxValue = maxValue
def get(self):
return self._value
def set(self, value):
self._value = self._clamp(value)
def _clamp(self, value):
if self._minValue is not None and value < self._minValue:
return self._minValue
if self._maxValue is not None and value > self._maxValue:
return self._maxValue
return value
class _Symmetric(_Value):
def __init__(self, value, minValue=None, maxValue=None ):
"""Create a symmetric native value"""
super(_Symmetric,self).__init__(None, minValue, maxValue)
self._side = [value,value]
def getSide(self, side):
"""Gets the right side (-1) or the left side (1) value."""
return self._side[(side+1)/2]
def getRight(self):
"""Gets the right side value."""
return self._side[0]
def getLeft(self):
"""Gets the left side value."""
return self._side[1]
def setSide(self, side, value):
"""Sets the right side (-1) or the left side (1) value or both at once (0)."""
value = self._clamp(value)
if side == 0 :
self._side = [value,value]
return
if side != -1 and side != 1 :
raise IndexError( 'Side must be -1, 0 or 1.' )
self._side[(side+1)/2] = value
def setRight(self, value):
"""Sets the right side value."""
self._side[0] = self._clamp(value)
def setLeft(self, value):
"""Sets the left side value."""
self._side[1] = self._clamp(value)
def forceSymmetric(self):
"""Make sure the value is symmetric by coping the left side (1) into the right side (-1)."""
self._side[0] = self._side[1]
| Python |
'''
Created on 2009-11-23
@author: beaudoin
'''
import wx, GLUtils, PyUtils, Core, Physics, Controllers
from CharacterDescription import CharacterDescription
from CharacterScaler import CharacterScalerFront
from ToolSet import ToolSet
class Model(object):
def __init__(self):
"""Creates a new model for a character description, including the required UI."""
app = wx.GetApp()
glCanvas = app.getGLCanvas()
self._container = glCanvas.addGLUITool( GLUtils.GLUIContainer )
self._container.setVisible(False)
self._optionsObservable = PyUtils.Observable()
self._sizer = GLUtils.GLUIBoxSizer(GLUtils.GLUI_HORIZONTAL)
self._container.setSizer(self._sizer)
self.reset()
self._toolSet = ToolSet(app.getToolPanel(), self)
def isSymmetric(self):
"""True if the character is forced to be symmetrical."""
return self._characterDescription.isSymmetric()
def setSymmetric(self, value):
"""Indicates if the character is forced to be symmetrical."""
if value == self.isSymmetric() : return
self._characterDescription.setSymmetric(value)
self._optionsObservable.notifyObservers()
def displayInterface(self, display):
"""Indicates wheter the interface for scaling the character should be displayed."""
if display != self.getDisplayInterface() :
self._container.setVisible(display)
self._container.getParent().layout()
self._optionsObservable.notifyObservers()
def getDisplayInterface(self):
"""Indicates wheter the interface for scaling the character is currently displayed."""
return self._container.isVisible()
def addOptionsObserver(self, observer):
"""Adds an observer for the options of the instant character model."""
self._optionsObservable.addObserver(observer)
def reset(self):
"""Resets character description to default values."""
self._characterDescription = CharacterDescription()
self._container.detachAllChildren()
self._frontScaler = CharacterScalerFront( self._container, characterDescription = self._characterDescription, minWidth = 250, minHeight = 500 )
# self._sideScaler = CharacterScalerSide( self._container, characterDescription = self._characterDescription, minWidth = 125, minHeight = 250 )
self._sizer.add(self._frontScaler)
# self._sizer.add(self._sideScaler)
self._container.getParent().layout()
self._optionsObservable.notifyObservers()
def create(self):
"""Creates the instant character based on the available description. Attach a reasonable controller, if available."""
app = wx.GetApp()
try:
wrappedController = PyUtils.wrapCopy( app.getController(0) )
except IndexError:
wrappedController = None
try:
character = app.getCharacter(0)
previousMass = character.getMass()
except IndexError:
previousMass = None
app.deleteAllObjects()
PyUtils.load( "RigidBodies.FlatGround" )
character = self._characterDescription.createCharacter()
character.computeMass()
app.addCharacter(character)
if wrappedController is not None:
controller = wrappedController.createAndFillObject(None, character)
if previousMass is not None:
massRatio = character.getMass() / previousMass
controller.scaleGains( massRatio )
app.addController(controller)
controller.setStance( Core.LEFT_STANCE )
self.connectController()
return character
def connectController(self, useCurrentSliders = True):
"""Connects the current controller to a behaviour."""
app = wx.GetApp()
worldOracle = app.getWorldOracle()
character = app.getCharacter(0)
controller = app.getController(0)
behaviour = Core.TurnController(character, controller, worldOracle)
behaviour.initializeDefaultParameters()
controller.setBehaviour(behaviour)
if useCurrentSliders :
behaviour.requestVelocities(self._toolSet.getCurrentSpeed(), 0);
behaviour.requestStepTime(self._toolSet.getCurrentDuration());
behaviour.requestCoronalStepWidth(self._toolSet.getCurrentStepWidth());
behaviour.requestHeading(0);
behaviour.conTransitionPlan();
self._toolSet.update()
app.takeSnapshot()
def getDesiredDuration(self):
"""Return the desired duration of a step."""
try:
return wx.GetApp().getController(0).getBehaviour().getDesiredStepTime()
except Exception:
return 0
def setDesiredDuration(self, duration):
"""Sets the desired duration of a step."""
try:
wx.GetApp().getController(0).getBehaviour().requestStepTime(duration)
except Exception:
pass
def getDesiredSpeed(self):
"""Return the desired speed of the character."""
try:
return wx.GetApp().getController(0).getBehaviour().getDesiredVelocitySagittal()
except Exception:
return 0
def setDesiredSpeed(self, speed):
"""Sets the desired speed of the character."""
try:
wx.GetApp().getController(0).getBehaviour().requestVelocities(speed, 0)
except Exception:
pass
def getDesiredStepWidth(self):
"""Return the desired step width of the character."""
try:
return wx.GetApp().getController(0).getBehaviour().getCoronalStepWidth()
except Exception:
return 0
def setDesiredStepWidth(self, stepWidth):
"""Sets the desired step width of the character."""
try:
wx.GetApp().getController(0).getBehaviour().requestCoronalStepWidth(stepWidth)
except Exception:
pass | Python |
from Model import Model
from CharacterDescription import CharacterDescription
from CharacterScaler import CharacterScalerFront
from ToolSet import ToolSet | Python |
'''
Created on 2009-11-23
@author: beaudoin
'''
import UI, wx
class ToolSet(UI.ToolSets.ToolsetBase):
def __init__(self, toolPanel, model):
"""Adds a tool set for the instant character to a toolpanel."""
super(ToolSet,self).__init__()
self._toolPanel = toolPanel
self._toolSet = toolPanel.createToolSet( "Instant Character" )
self.addOption( "Edit character", model.getDisplayInterface, model.displayInterface )
self.addOption( "Symmetrical", model.isSymmetric, model.setSymmetric )
self.addButton( "Create", model.create )
self.addButton( "Reset character", model.reset )
self._speedSlider, self._speedSliderData = self.addSlider( "Speed", min=-1, max=5.0, step=0.01, getter = model.getDesiredSpeed, setter = model.setDesiredSpeed)
self._durationSlider, self._durationSliderData = self.addSlider( "Duration", min=0.2, max=1.0, step=0.01, getter = model.getDesiredDuration, setter = model.setDesiredDuration)
self._stepWidthSlider, self._stepWidthSliderData = self.addSlider( "Width", min=0.0, max=0.3, step=0.01, getter = model.getDesiredStepWidth, setter = model.setDesiredStepWidth)
# Add this as an observer
model.addOptionsObserver(self)
# Initial update
self.update()
def getCurrentSpeed(self):
"""Gets the speed as currently shown on the slider."""
return self._speedSliderData.getSliderValue()
def getCurrentDuration(self):
"""Gets the duration as currently shown on the slider."""
return self._durationSliderData.getSliderValue()
def getCurrentStepWidth(self):
"""Gets the step width as currently shown on the slider."""
return self._stepWidthSliderData.getSliderValue() | Python |
'''
Created on 2009-09-02
@author: beaudoin
'''
import Proxy, Member
import App
cls = App.ObservableList
ObservableList = Proxy.create( cls,
members = [
Member.ObjectList( 'objects', None, cls.get, cls.getCount, cls.add, embedInParentNode = True ),
] )
cls = App.SnapshotBranch
SnapshotBranch = Proxy.create( cls,
nameGetter = lambda object: "Branch",
icon = "../data/ui/snapshotBranch.png",
members = [
Member.ObjectList( 'snapshots', None, cls.getSnapshot, cls.getSnapshotCount, None, embedInParentNode = True ),
] )
cls = App.Snapshot
Snapshot = Proxy.create( cls,
nameGetter = cls.getName,
icon = "../data/ui/snapshotIcon.png",
members = [
Member.ObjectList( 'branches', None, cls.getBranch, cls.getBranchCount, None, embedInParentNode = True ),
] )
| Python |
'''
Created on 2009-09-22
@author: beaudoin
This module contains classes representing different possible type of data members within proxy object.
A member knows how to get/set itself from/to the wrapped object.
'''
import MathLib
import PyUtils
class TopLevel(object):
def __init__(self, type, name, default, editable = False, fancyName = None, isObject = False ):
self.name = name
self.default = default
self.editable = editable
self.isObject = isObject
self.type = type
self.fancyName = fancyName
if fancyName == None:
self.fancyName = PyUtils.unCamelCase(name)
def basicInterpret(self,value):
"""Performs basic interpretation, does not coerce into type."""
if self.type not in (str,unicode) and isinstance(value, basestring) :
return eval(value,dict(__builtins__=__builtins__))
return value
def interpret(self, value):
"""This method lets a user have different ways to input a member."""
value = self.basicInterpret(value)
if self.type is None or value is None or isinstance(value, self.type): return value
return self.type(value)
def format(self, value):
"""This method returns the preferred representation of the data."""
return value
class Basic(TopLevel):
def __init__( self, type, name, default, getter, setter, editable = True, fancyName = None, isObject = False ):
super( Basic, self ).__init__( type, name, default, editable, fancyName, isObject )
self._getter = getter
self._setter = setter
def get(self, object):
if self._getter is None : return None
return self._getter(object)
def set(self, object, value, container = None):
if self._setter is None : return
if value is None : return
try: self._setter(object, value, container)
except ( TypeError, NotImplementedError ): self._setter(object, value)
class Tuple(Basic):
"""Can be used when setter takes multiple parameters and getter returns multiple parameters. Otherwise use Basic with type tuple."""
def __init__( self, name, default, getter, setter, editable = True, fancyName = None ):
super( Tuple, self ).__init__( tuple, name, default, getter, setter, editable, fancyName, False )
def get(self, object):
if self._getter is None : return None
# TODO do something here!
return None
def set(self, object, value, container = None):
if self._setter is None : return
if value is None : return
self._setter(object, *value)
class Enum(Basic):
def __init__( self, enum, name, default, getter, setter, editable = True, fancyName = None ):
"""Pass the enum class you want to use for this member"""
super( Enum, self ).__init__( int, name, default, getter, setter, editable, fancyName, False )
self.enum = enum
def interpret(self, value):
if value is None : return None
elif isinstance(value, basestring) : return self.enum.toInt( value )
else : return int( value )
def format(self, value):
return self.enum.toStr(value)
class Vector3d(Basic):
def __init__( self, name, default, getter, setter, editable = True, fancyName = None ):
super( Vector3d, self ).__init__( MathLib.Vector3d, name, default, getter, setter, editable, fancyName, False )
def interpret(self, value):
value = self.basicInterpret(value)
if isinstance(value, MathLib.Vector3d) : return value
else : return PyUtils.toVector3d( value )
def format(self, value):
return PyUtils.fromVector3d(value)
class Point3d(Basic):
def __init__( self, name, default, getter, setter, editable = True, fancyName = None ):
super( Point3d, self ).__init__( MathLib.Point3d, name, default, getter, setter, editable, fancyName, False )
def interpret(self, value):
value = self.basicInterpret(value)
if isinstance(value, MathLib.Point3d) : return value
else : return PyUtils.toPoint3d( value )
def format(self, value):
return PyUtils.fromPoint3d(value)
class Quaternion(Basic):
def __init__( self, name, default, getter, setter, editable = True, fancyName = None ):
super( Quaternion, self ).__init__( MathLib.Quaternion, name, default, getter, setter, editable, fancyName, False )
def interpret(self, value):
value = self.basicInterpret(value)
if isinstance(value, MathLib.Quaternion) : return value
else : return PyUtils.angleAxisToQuaternion( value )
def format(self, value):
return PyUtils.angleAxisFromQuaternion(value)
class Trajectory1d(Basic):
def __init__( self, name, default, getter, setter, editable = True, fancyName = None ):
super( Trajectory1d, self ).__init__( MathLib.Trajectory1d, name, default, getter, setter, editable, fancyName, False )
def interpret(self, value):
value = self.basicInterpret(value)
if isinstance(value, MathLib.Trajectory1d) : return value
else : return PyUtils.toTrajectory1d( value )
def format(self, value):
return PyUtils.fromTrajectory1d(value)
class Object(Basic):
def __init__( self, name, default, getter, setter, editable = True, fancyName = None ):
super( Object, self ).__init__( None, name, default, getter, setter, editable, fancyName, True )
def getObject(self, object):
"""Return unwrapped object."""
return super( Object, self ).get(object)
def get(self, object):
return PyUtils.wrap( self.getObject(object) )
def set(self, object, valueProxy, container = None):
if valueProxy is None : return
try:
# Try to re-fill the existing object
value = self.getObject(object)
try: value.beginBatchChanges()
except AttributeError: pass
try:
valueProxy.fillObject( value, object )
finally:
try: value.endBatchChanges()
except AttributeError: pass
except(AttributeError, TypeError):
value = valueProxy.createObject()
valueProxy.fillObject( value, object )
super( Object, self ).set(object,value,container)
class ObjectList(TopLevel):
def __init__( self, name, default, getAtIndex, getCount, addToContainer, editable = True, fancyName = None, embedInParentNode = False, groupIcon = "../data/ui/objectListIcon.png" ):
super( ObjectList, self ).__init__( None, name, default, editable, fancyName, True )
self._getAtIndex = getAtIndex
self._getCount = getCount
self._addToContainer = addToContainer
self.embedInParentNode = embedInParentNode
self.groupIcon = groupIcon
def getObject(self, object, i):
"""Return unwrapped object."""
return self._getAtIndex(object,i)
def getCount(self, object):
return self._getCount(object)
def get(self, object):
result = []
for i in range( self.getCount(object) ) :
result.append( PyUtils.wrap( self.getObject(object,i) ) )
return result
def _set(self, object, i, valueProxy, container):
if valueProxy is None : return
try:
# Try to re-fill the existing object
if i >= self.getCount(object) : raise IndexError;
value = self.getObject(object, i)
try: value.beginBatchChanges()
except AttributeError: pass
try:
valueProxy.fillObject( value, object )
finally:
try: value.endBatchChanges()
except AttributeError: pass
except(AttributeError, TypeError, IndexError, ValueError):
value = valueProxy.createObject()
if value is None : return
valueProxy.fillObject( value, object )
try: self._addToContainer(object, value, container)
except ( TypeError, NotImplementedError ): self._addToContainer(object, value)
def set(self, object, valueProxyList, container = None):
if valueProxyList is None : return
PyUtils.callOnObjectOrListAndEnumerate( valueProxyList, lambda i, valueProxy: self._set(object,i,valueProxy,container) )
| Python |
'''
Created on 2009-09-02
@author: beaudoin
'''
import Physics
import Proxy, Member
import PyUtils
import Physics
def addMesh( rb, mesh ):
"""Adds a single mesh which can be either:
- A single string (mesh filename)
- A tuple or list with a single string (mesh filename)
- A tuple or list with two elements (mesh filename and a 4-tuple color)
- A dict with "obj" and "colour" as keys (mesh filename and a 4-tuple color)
"""
if mesh is None :
return
errorStr = "Mesh should be a string or a 2-tupe, string and colour."
obj = None
colour = None
if isinstance( mesh, basestring ):
obj = mesh
else:
try:
# Maybe a list or tuple?
if len(mesh) > 2:
raise TypeError( "More than 2 elements, mesh is probably a dict. This error should be caught." )
obj = mesh[0]
if len(mesh) > 1:
colour = mesh[1]
except(TypeError, IndexError):
try:
obj = mesh["obj"]
if mesh.has_key("colour"):
colour = mesh["colour"]
except(KeyError, AttributeError):
raise TypeError( errorStr )
if not isinstance( obj, basestring ):
raise TypeError( errorStr )
if colour is not None and len(colour) != 4:
raise TypeError( errorStr )
rb.addMeshObj( obj )
if colour is not None:
rb.setColour( colour[0], colour[1], colour[2], colour[3] )
def setMeshes( rb, meshes ):
PyUtils.callOnObjectOrList( meshes, lambda(mesh): addMesh(rb,mesh) )
def getMeshes( rb ):
"""DUMMY! Meshes are forgotten."""
return None
def getGroundCoeffs(object):
return (object.getGroundSoftness(), object.getGroundPenalty())
def setGroundCoeffs(object, value):
return object.setODEGroundCoefficients(*value)
cls = Physics.RigidBody
RigidBody = Proxy.create( cls, loader = Physics.world().addRigidBody,
members = [
Member.Basic( str, 'name', 'UnnamedRigidBody', cls.getName, cls.setName ),
Member.Basic( list, 'meshes', None, getMeshes, setMeshes ),
Member.Basic( float, 'mass', 1.0, cls.getMass, cls.setMass ),
Member.Vector3d( 'moi', (1.0,1.0,1.0), cls.getMOI, cls.setMOI ),
Member.ObjectList( 'cdps', None, cls.getCollisionDetectionPrimitive, cls.getCollisionDetectionPrimitiveCount, cls.addCollisionDetectionPrimitive ),
Member.Point3d( 'pos', (0.0,0.0,0.0), cls.getCMPosition, cls.setCMPosition ),
Member.Vector3d( 'vel', (0.0,0.0,0.0), cls.getCMVelocity, cls.setCMVelocity ),
Member.Quaternion( 'orientation', (0.0,(1.0,0.0,0.0)), cls.getOrientation, cls.setOrientation ),
Member.Vector3d( 'angularVel', (0.0,0.0,0.0), cls.getAngularVelocity, cls.setAngularVelocity ),
Member.Basic( bool, 'locked', False, cls.isLocked, cls.lockBody ),
Member.Basic( float, 'frictionCoeff', 0.8, cls.getFrictionCoefficient, cls.setFrictionCoefficient ),
Member.Basic( float, 'restitutionCoeff', 0.35, cls.getRestitutionCoefficient, cls.setRestitutionCoefficient ),
Member.Basic( tuple, 'groundCoeffs', (0.00001,0.2), getGroundCoeffs, setGroundCoeffs ),
Member.Basic( bool, 'planar', False, cls.isPlanar, cls.setPlanar )
] )
cls = Physics.ArticulatedRigidBody
ArticulatedRigidBody = Proxy.create( cls, parent = RigidBody, loader = Physics.world().addRigidBody, caster = Physics.castToArticulatedRigidBody )
| Python |
'''
Created on 2009-09-03
@author: beaudoin
'''
import Proxy
import Member
import Physics
def getArb(af, arbName):
if arbName is None : return None
arb = af.getARBByName( arbName )
if arb is None:
raise KeyError( "Articulated rigid body '"+arbName+"' not found in articulated figure '"+af.getName()+"'." )
return arb
def setParent(joint, arbName, af):
joint.setParent( getArb(af,arbName) )
def setChild(joint, arbName, af):
joint.setChild( getArb(af,arbName) )
def getParent(joint):
return joint.getParent().getName()
def getChild(joint):
return joint.getChild().getName()
cls = Physics.Joint
Joint = Proxy.create( cls,
members = [
Member.Basic( str, 'name', 'UnnamedJoint', cls.getName, cls.setName ),
Member.Basic( str, 'parent', None, getParent, setParent ),
Member.Basic( str, 'child', None, getChild, setChild ),
Member.Point3d( 'posInParent', (0.0,0.0,0.0), cls.getParentJointPosition, cls.setParentJointPosition ),
Member.Point3d( 'posInChild', (0.0,0.0,0.0), cls.getChildJointPosition, cls.setChildJointPosition )
] )
cls = Physics.BallInSocketJoint
BallInSocketJoint = Proxy.create( cls, parent = Joint, caster = Physics.castToBallInSocketJoint,
members = [
Member.Vector3d( 'swingAxis1', (1.0,0.0,0.0), cls.getSwingAxis1, cls.setSwingAxis1 ),
Member.Vector3d( 'swingAxis2', None, cls.getSwingAxis2, cls.setSwingAxis2 ),
Member.Vector3d( 'twistAxis', None, cls.getTwistAxis, cls.setTwistAxis ),
Member.Tuple( 'limits', (-3.1416,3.1416,-3.1416,3.1416,-3.1416,3.1416), None, cls.setJointLimits )
] )
cls = Physics.UniversalJoint
UniversalJoint = Proxy.create( cls, parent = Joint, caster = Physics.castToUniversalJoint,
members = [
Member.Vector3d( 'parentAxis', (1.0,0.0,0.0), cls.getParentAxis, cls.setParentAxis ),
Member.Vector3d( 'childAxis', (1.0,0.0,0.0), cls.getChildAxis, cls.setChildAxis ),
Member.Tuple( 'limits', (-3.1416,3.1416,-3.1416,3.1416), None, cls.setJointLimits )
] )
cls = Physics.HingeJoint
HingeJoint = Proxy.create( cls, parent = Joint, caster = Physics.castToHingeJoint,
members = [
Member.Vector3d( 'axis', (1.0,0.0,0.0), cls.getAxis, cls.setAxis ),
Member.Tuple( 'limits', (-3.1416,3.1416), None, cls.setJointLimits )
] )
cls = Physics.StiffJoint
StiffJoint = Proxy.create( cls, parent = Joint, caster = Physics.castToStiffJoint )
| Python |
'''
Created on 2009-09-24
@author: beaudoin
'''
import Utils, wx, PyUtils
import Member
from UI.InfoTree import getIconIndex
class NodeData(Utils.Observer):
"""
This class wraps one object presented in a node of the tree.
The wrapped object must be observable (either derived from Utils.Observable for a C++ object
or App.Observable for a Python object).
An object keeps an ordered list of its child objects.
An object also has a list of properties that can be edited or not.
"""
def __init__(self, memberName, object, tree, treeItemId, container = None, index = None):
"""Subclasses must calls this constructor.
Pass a string giving the member name for this node (member should be either App.Proxys.Member.Object or App.Proxys.Member.ObjectList.
Pass the object to observe, the member information for this object (from App.Proxys.Member).
Also pass the tree and the treeItemId where this object is found.
In container, pass the object that contains this node (in an HAS_A or HAS_MANY relationship)
In index, pass the index of this node within its container (in an HAS_MANY relationship)
"""
super( NodeData, self ).__init__()
self._memberName = memberName
self._object = object
self._tree = tree
self._treeItemId = treeItemId
self._container = container
self._index = index
if object is None :
self._proxyClass = None
iconIndex = getIconIndex( '../data/ui/noneIcon.png' )
else:
object.addObserver( self )
self._proxyClass = PyUtils.getProxyClass(object)
iconIndex = getIconIndex( self._proxyClass.getIcon() )
self._tree.SetItemImage( self._treeItemId, iconIndex )
self._subMemberNodes = []
if self._proxyClass is not None :
for subMember in self._proxyClass.getObjectMembers():
self._subMemberNodes.append( _createSubMemberNode(subMember, self._tree, self._treeItemId, object) )
tree.SetItemPyData( treeItemId, self )
# Initial update
self.update()
def __del__(self):
try: self._object.deleteObserver( self )
except AttributeError: pass
def update(self, data = None):
"""Called whenever the attached object is updated."""
self._tree.updateLock()
if self._object is not None :
name = self._proxyClass.getName(self._object)
else:
name = "None (%s)" % self._memberName
self._tree.SetItemText( self._treeItemId, name )
for subMemberNode in self._subMemberNodes:
subMemberNode.update( self._object )
self._tree.updateUnlock()
def getObject(self):
"""Access the object attached to this NodeData."""
return self._object
def _createSubMemberNode( subMember, tree, parentTreeItemId, container ):
"""
Private.
Adds a node to the tree if the subMember is a Member.Object or a non-embedded Member.ObjectList.
If it is an embedded Member.ObjectList, no node is added.
Then creates a subMemberNode that contains enough information to watch this member.
"""
if isinstance( subMember, Member.Object ):
return subMemberObjectNode(subMember, tree, container, parentTreeItemId)
elif isinstance( subMember, Member.ObjectList ):
if subMember.embedInParentNode:
return subMemberEmbeddedObjectListNode(subMember, tree, container, parentTreeItemId)
else:
return subMemberObjectListNode(subMember, tree, container, parentTreeItemId)
else:
raise TypeError( "Unknown object member type: '%s'." % subMember.__class__.__name__ )
class subMemberNode(object):
"""Private class. Information for a submember node."""
def __init__(self, subMember, tree, container):
self._member = subMember
self._tree = tree
self._container = container
class subMemberObjectNode(subMemberNode):
"""Private class. Information for a submember node that wraps a single object."""
def __init__(self, subMember, tree, container, parentTreeItemId):
"""Create a new node inside the tree that wraps the specified submember"""
super(subMemberObjectNode,self).__init__(subMember, tree, container)
self._treeItemId = tree.AppendItem( parentTreeItemId, "" )
if tree.isAutoVisible():
tree.EnsureVisible(self._treeItemId)
def getObject(self):
"""Returns the object associated with this node, or None if no object is associated."""
nodeData = self._tree.GetItemPyData( self._treeItemId )
if nodeData is None :
raise AttributeError( 'No data attached to tree node!' );
return nodeData.getObject()
def update(self, object):
"""Updates the node so that it contains the specified object."""
subObject = self._member.getObject(object)
try:
currentObject = self.getObject()
if PyUtils.sameObject(currentObject, subObject) : return
except AttributeError: pass
nodeData = NodeData( self._member.name, subObject, self._tree, self._treeItemId, self._container )
class subMemberAbstractObjectListNode(subMemberNode):
"""
Private class. provides some methods shared by embedded and non-embedded node lists.
Subclasses must define insertChild()
"""
def __init__(self, subMember, tree, container):
super(subMemberAbstractObjectListNode,self).__init__(subMember, tree, container)
self._subTreeItemIds = []
def getObject(self, index):
"""Returns the object number index associated with this node, or None if no object is associated.
Raises a IndexError if the index is out of bound"""
nodeData = self._tree.GetItemPyData( self._subTreeItemIds[index] )
if nodeData is None :
raise AttributeError( 'No data attached to tree node!' );
return nodeData.getObject()
def removeChildren(self, start=None, end=None):
"""Private. This method removes objects from a tree and a children list.
Objects removed are in the range [start,end).
Runs from object 0 when start==None and until the end when end==None."""
if start is None : start = 0
if end is None : end = len( self._subTreeItemIds )
for child in self._subTreeItemIds[start:end]:
self._tree.Delete(child)
del self._subTreeItemIds[start:end]
def update(self, object):
"""Adjusts the node's _subTreeItemIds list so that it matches an input object list.
TreeItemId s will be deleted or inserted into _subTreeItemIds based on its current content and
that of the input object list."""
count = self._member.getCount(object)
for i in range(count):
inObject = self._member.getObject(object,i)
try:
outObject = self.getObject(i)
areSameObject = PyUtils.sameObject(inObject, outObject)
except IndexError:
outObject = None
areSameObject = False
if not areSameObject:
# Need to delete or insert
# First, check how many objects we should delete
delete = 1
try:
while not PyUtils.sameObject( inObject, self.getObject(i+delete) ) :
delete += 1
except IndexError:
delete = 0
if delete > 0 :
# Delete the specified objects
self.removeChildren(i,i+delete)
else :
# Insert the specified object
self.insertChild(i, inObject)
# Delete any remaining objects
self.removeChildren(count)
class subMemberObjectListNode(subMemberAbstractObjectListNode):
"""Private class. Information for a submember node that wraps a single object."""
def __init__(self, subMember, tree, container, parentTreeItemId):
"""Create a new node inside the tree that wraps the specified submember"""
super(subMemberObjectListNode,self).__init__(subMember, tree, container)
self._listNodeTreeItemId = tree.AppendItem( parentTreeItemId, subMember.fancyName )
tree.SetItemPyData( self._listNodeTreeItemId, (subMember, container) )
iconIndex = getIconIndex( subMember.groupIcon )
tree.SetItemImage( self._listNodeTreeItemId, iconIndex )
if tree.isAutoVisible():
tree.EnsureVisible(self._listNodeTreeItemId)
def insertChild(self, pos, object):
"""Private. Wraps the specified object in a new NodeData and inserts it
at the specified position in the tree."""
treeItemId = self._tree.InsertItemBefore( self._listNodeTreeItemId, pos, "" )
nodeData = NodeData( self._member.name, object, self._tree, treeItemId, self._container, pos )
self._subTreeItemIds.insert(pos, treeItemId)
if self._tree.isAutoVisible():
self._tree.EnsureVisible(treeItemId)
class subMemberEmbeddedObjectListNode(subMemberAbstractObjectListNode):
"""Private class. Information for a submember node that wraps a single object."""
def __init__(self, subMember, tree, container, parentTreeItemId):
"""Create a new node inside the tree that wraps the specified submember"""
super(subMemberEmbeddedObjectListNode,self).__init__(subMember, tree, container)
self._parentTreeItemId = parentTreeItemId
self._previousTreeItemId = tree.GetLastChild(parentTreeItemId) # Will be invalid if there is no last child
def insertChild(self, pos, object):
"""Private. Wraps the specified object in a new NodeData and inserts it
at the specified position in the tree."""
if pos == 0:
# Insertion at the beginning, tricky.
if len(self._subTreeItemIds) == 0 :
# Nothing in the list, add after self._previousTreeItemId
if not self._previousTreeItemId.IsOk():
# At the very top of the tree
treeItemId = self._tree.InsertItemBefore( self._parentTreeItemId, 0, "" )
else:
treeItemId = self._tree.InsertItem( self._parentTreeItemId, self._previousTreeItemId, "" )
else:
# Insert before first object in the tree
prevItemId = self.tree.GetPrevSibling( self._parentTreeItemId, self._subTreeItemIds[0], "" )
if not prevItemId.IsOk():
# At the very top of the tree
treeItemId = self._tree.InsertItemBefore( self._parentTreeItemId, 0, "" )
else:
treeItemId = self._tree.InsertItem( self._parentTreeItemId, prevItemId, "" )
else:
# Insertion in the middle, easy
treeItemId = self._tree.InsertItem( self._parentTreeItemId, self._subTreeItemIds[pos-1], "" )
nodeData = NodeData( self._member.name, object, self._tree, treeItemId, self._container, pos )
self._subTreeItemIds.insert(pos, treeItemId)
if self._tree.isAutoVisible():
self._tree.EnsureVisible(treeItemId)
| Python |
'''
Created on 2009-09-03
@author: beaudoin
'''
import Proxy
import Member
from PyUtils import enum
import Controllers
import wx, Core
def getControlParams(controller, i):
if i == 0 :
return controller.getRootControlParams()
else :
return controller.getControlParams(i-1)
def getControlParamsCount(controller):
return controller.getControlParamsCount() + 1
cls = Core.SimBiController
SimBiController = Proxy.create( cls, caster = Core.castToSimBiController, loader = wx.GetApp().addController,
nameGetter = cls.getName,
icon = "../data/ui/controllerIcon.png",
members = [
Member.Basic( str, 'name', 'UnnamedSimBiController', cls.getName, cls.setName ),
Member.Basic( int, 'startingState', 0, cls.getStartingState, cls.setStartingState ),
Member.Basic( float, 'stanceHipDamping', 25, cls.getStanceHipDamping, cls.setStanceHipDamping ),
Member.Basic( float, 'stanceHipMaxVelocity', 4, cls.getStanceHipMaxVelocity, cls.setStanceHipMaxVelocity ),
Member.ObjectList( 'controlParamsList', None, getControlParams, getControlParamsCount, cls.addControlParams ),
Member.ObjectList( 'states', None, cls.getState, cls.getStateCount, cls.addState, embedInParentNode = True )
] )
def setJoint(joint, jointName, controller):
if not joint.setJoint( controller.getCharacter().getJointByName(jointName) ):
raise ValueError( "Setting the wrong joint." )
cls = Core.ControlParams
ControlParams = Proxy.create( cls, caster = Core.castToControlParams,
nameGetter = cls.getJointName,
icon = "../data/ui/controlParamsIcon.png",
members = [
Member.Basic( str, 'joint', 'noJointName', cls.getJointName, setJoint, editable = False ),
Member.Basic( float, 'kp', None, cls.getKp, cls.setKp ),
Member.Basic( float, 'kd', None, cls.getKd, cls.setKd ),
Member.Basic( float, 'tauMax', 1000.0, cls.getMaxAbsTorque, cls.setMaxAbsTorque ),
Member.Vector3d( 'scale', (1.0,1.0,1.0), cls.getScale, cls.setScale )
] )
TransitionOn = enum( 'FOOT_CONTACT', 'TIME_UP' )
TO_FOOT_CONTACT = TransitionOn.toInt( 'FOOT_CONTACT' )
TO_TIME_UP = TransitionOn.toInt( 'TIME_UP' )
Stance = enum( { 'LEFT' : Core.SimBiConState.STATE_LEFT_STANCE ,
'RIGHT' : Core.SimBiConState.STATE_RIGHT_STANCE,
'REVERSE' : Core.SimBiConState.STATE_REVERSE_STANCE,
'KEEP' : Core.SimBiConState.STATE_KEEP_STANCE } )
def getTransitionOn(state):
if state.getTransitionOnFootContact(): return TO_FOOT_CONTACT
else: return TO_TIME_UP
def setTransitionOn(state, value):
if value == TO_FOOT_CONTACT : state.setTransitionOnFootContact(True)
elif value == TO_TIME_UP : state.setTransitionOnFootContact(False)
else: raise ValueError( "SimBiConState member 'transitionOn' has invalid value: '" + str(value) + "'" )
cls = Core.SimBiConState
SimBiConState = Proxy.create( cls, caster = Core.castToSimBiConState,
nameGetter = cls.getName,
icon = "../data/ui/fsmstateIcon.png",
members = [
Member.Basic( str, 'name', 'UnnamedSimBiConState', cls.getName, cls.setName ),
Member.Basic( int, 'nextStateIndex', None, cls.getNextStateIndex, cls.setNextStateIndex ),
Member.Enum( TransitionOn, 'transitionOn', TO_FOOT_CONTACT, getTransitionOn, setTransitionOn ),
Member.Enum( Stance, 'stance', Core.SimBiConState.STATE_REVERSE_STANCE, cls.getStance, cls.setStance ),
Member.Basic( float, 'duration', 0.5, cls.getDuration, cls.setDuration ),
Member.Trajectory1d( 'dTrajX', None, cls.getDTrajX, cls.setDTrajX ),
Member.Trajectory1d( 'dTrajZ', None, cls.getDTrajZ, cls.setDTrajZ ),
Member.Trajectory1d( 'vTrajX', None, cls.getVTrajX, cls.setVTrajX ),
Member.Trajectory1d( 'vTrajZ', None, cls.getVTrajZ, cls.setVTrajZ ),
Member.ObjectList( 'externalForces', None, cls.getExternalForce, cls.getExternalForceCount, cls.addExternalForce ),
Member.ObjectList( 'trajectories', None, cls.getTrajectory, cls.getTrajectoryCount, cls.addTrajectory, embedInParentNode = True )
] )
cls = Core.ExternalForce
ExternalForce = Proxy.create( cls, caster = Core.castToExternalForce,
nameGetter = cls.getBodyName,
icon = "../data/ui/externalForceIcon.png",
members = [
Member.Basic( str, 'body', 'UnnamedExternalForce', cls.getBodyName, cls.setBodyName ),
Member.Trajectory1d( 'forceX', None, cls.getForceX, cls.setForceX ),
Member.Trajectory1d( 'forceY', None, cls.getForceY, cls.setForceY ),
Member.Trajectory1d( 'forceZ', None, cls.getForceZ, cls.setForceZ ),
Member.Trajectory1d( 'torqueX', None, cls.getTorqueX, cls.setTorqueX ),
Member.Trajectory1d( 'torqueY', None, cls.getTorqueY, cls.setTorqueY ),
Member.Trajectory1d( 'torqueZ', None, cls.getTorqueZ, cls.setTorqueZ )
] )
ReferenceFrame = enum( 'PARENT_RELATIVE', 'CHARACTER_RELATIVE' )
RF_PARENT_RELATIVE = ReferenceFrame.toInt( 'PARENT_RELATIVE' )
RF_CHARACTER_RELATIVE = ReferenceFrame.toInt( 'CHARACTER_RELATIVE' )
def getReferenceFrame(trajectory):
if trajectory.isRelativeToCharacterFrame(): return RF_CHARACTER_RELATIVE
else: return RF_PARENT_RELATIVE
def setReferenceFrame(trajectory, value):
if value == RF_PARENT_RELATIVE : trajectory.setRelativeToCharacterFrame(False)
elif value == RF_CHARACTER_RELATIVE : trajectory.setRelativeToCharacterFrame(True)
else: raise ValueError( "SimBiConTrajectory member 'referenceFrame' has invalid value: '" + str(value) + "'" )
cls = Core.Trajectory
Trajectory = Proxy.create( cls, caster = Core.castToTrajectory,
nameGetter = cls.getJointName,
icon = "../data/ui/trajectoryIcon.png",
members = [
Member.Basic( str, 'joint', 'UnnamedTrajectory', cls.getJointName, cls.setJointName ),
Member.Trajectory1d( 'strength', None, cls.getStrengthTrajectory, cls.setStrengthTrajectory ),
Member.Enum( ReferenceFrame, 'referenceFrame', RF_PARENT_RELATIVE, getReferenceFrame, setReferenceFrame ),
Member.ObjectList( 'components', None, cls.getTrajectoryComponent, cls.getTrajectoryComponentCount, cls.addTrajectoryComponent, embedInParentNode = True )
] )
ReverseOnStance = enum( { 'LEFT' : Core.TrajectoryComponent.ROS_LEFT ,
'RIGHT' : Core.TrajectoryComponent.ROS_RIGHT,
'DONT_REVERSE' : Core.TrajectoryComponent.ROS_DONT_REVERSE } )
def getTrajectoryComponentName(trajectoryComponent):
"""Called whenever the trajectory component is updated."""
axis = trajectoryComponent.getRotationAxis()
if( axis.x != 0 and axis.y == 0 and axis.z == 0 ) :
return "X axis"
elif( axis.x == 0 and axis.y != 0 and axis.z == 0 ) :
return "Y axis"
elif( axis.x == 0 and axis.y == 0 and axis.z != 0 ) :
return "Z axis"
else :
return "Axis (%f,%f,%f)" % ( axis.x, axis.y, axis.z )
cls = Core.TrajectoryComponent
TrajectoryComponent = Proxy.create( cls, caster = Core.castToTrajectoryComponent,
nameGetter = getTrajectoryComponentName,
icon = "../data/ui/trajectoryComponentIcon.png",
members = [
Member.Vector3d( 'rotationAxis', None, cls.getRotationAxis, cls.setRotationAxis ),
Member.Enum( ReverseOnStance, 'reverseOnStance', Core.TrajectoryComponent.ROS_DONT_REVERSE, cls.getReverseOnStance, cls.setReverseOnStance),
Member.Object( 'feedback', None, cls.getFeedback, cls.setFeedback ),
Member.Trajectory1d( 'baseTrajectory', None, cls.getBaseTrajectory, cls.setBaseTrajectory ),
Member.Trajectory1d( 'dScaledTrajectory', None, cls.getDTrajScale, cls.setDTrajScale ),
Member.Trajectory1d( 'vScaledTrajectory', None, cls.getVTrajScale, cls.setVTrajScale )
] )
def getDLimits(object):
return (object.getDMin(), object.getDMax())
def setDLimits(object, value):
return object.setDLimits(*value)
def getVLimits(object):
return (object.getVMin(), object.getVMax())
def setVLimits(object, value):
return object.setVLimits(*value)
cls = Core.LinearBalanceFeedback
LinearBalanceFeedback = Proxy.create( cls, caster = Core.castToLinearBalanceFeedback,
icon = "../data/ui/feedbackIcon.png",
members = [
Member.Vector3d( 'axis', None, cls.getProjectionAxis, cls.setProjectionAxis ),
Member.Basic( float, 'cd', 0, cls.getCd, cls.setCd ),
Member.Basic( float, 'cv', 0, cls.getCv, cls.setCv ),
Member.Basic( tuple, 'dLimits', (-1000,1000), getDLimits, setDLimits ),
Member.Basic( tuple, 'vLimits', (-1000,1000), getVLimits, setVLimits )
] )
cls = Core.IKVMCController
IKVMCController = Proxy.create( cls, parent = SimBiController, caster = Core.castToIKVMCController, loader = wx.GetApp().addController,
members = [
Member.Trajectory1d( 'sagittalTrajectory', None, cls.getSwingFootTrajectoryDeltaSagittal, cls.setSwingFootTrajectoryDeltaSagittal ),
Member.Trajectory1d( 'coronalTrajectory', None, cls.getSwingFootTrajectoryDeltaCoronal, cls.setSwingFootTrajectoryDeltaCoronal ),
Member.Trajectory1d( 'heightTrajectory', None, cls.getSwingFootTrajectoryDeltaHeight, cls.setSwingFootTrajectoryDeltaHeight )
] )
cls = Controllers.DanceController
DanceController = Proxy.create( cls, parent = IKVMCController, loader = wx.GetApp().addController )
cls = Controllers.WalkController
WalkController = Proxy.create( cls, parent = IKVMCController, loader = wx.GetApp().addController )
| Python |
'''
Created on 2009-09-22
@author: beaudoin
'''
import PyUtils
def create( wrappedClass = None, members = [], parent = None, caster = None, loader = None, verbose = True, icon = None, nameGetter = None ):
"""
Creates a new proxy class (NOT an instance).
This proxy class is meant as a wrapper to the specified wrappedClass.
It contains a specific list of members, these members should be instances of objects in App.Proxys.Member.
The order of the members is important, and will stay as specified.
If it has a parent class, then it inherits all the members from the parent class, as well as the newly specified ones,
the order is: parent's members first, then child's members.
If a member exists both in the parent and the child, then the child's takes precedence, but it appears at the position of the parent
(this allows changing member's default value, for example).
If a caster is specified, then it will be used to cast down the wrapped object whenever it's needed.
If a loader is specified, then the wrapped object can be loaded directly using the load method.
If the wrapper is verbose (default), then its representation will use the long format:
memberName = value
Also, fields having default values will not be represented.
If the wrapper is not verbose, then its representation will be short, all the values will be listed in their correct order.
In icon, specify the filename of a png to use as an icon to represent the object. Pass None to use parent's icon.
In nameGetter, specify the method to call to obtain a string representing the name of this object.
"""
kwargs = {}
for name, var in locals().items():
if name == 'kwargs' : continue
kwargs["_"+name] = var
return _create(**kwargs)
def _create( _wrappedClass, _members, _parent, _caster, _loader, _verbose, _icon, _nameGetter ):
"""Private, forwarded from create for convenience in member name."""
@classmethod
def wrap( cls, object, recursive = True ):
"""
This class method creates an instance of this proxy class wrapping the specified object.
To make sure you use the right proxy class, consider calling App.PyUtils.wrap instead.
Pass recursive = False to only wrap non-object members. The members containing object references or list of objects will not be wrapped and will be None.
"""
if cls._caster is not None : object = cls._caster(object)
kwargs = {}
for member in cls._members:
if not recursive and member.isObject: continue
kwargs[ member.name ] = member.get(object)
return cls( **kwargs )
@classmethod
def getIcon( cls ):
return cls._icon
@classmethod
def getName( cls, object ):
if cls._caster is not None : object = cls._caster(object)
try: return cls._nameGetter( object )
except TypeError: return cls.__name__
@classmethod
def getObjectMembers(cls):
return filter( lambda m : m.isObject, cls._members )
def __init__(self, *args, **kwargs):
"""
Initialize a proxy object of this proxy class.
All the members must be initialized in the order in which they appear or using keyword arguments.
For a specific list of members, look at:
self.members
"""
if len(args) > len( self._members ):
raise TypeError( "Too many arguments when creating proxy '%s'." % type(self).__name__ )
for i, member in enumerate( self._members ):
if i < len(args):
value = args[i]
else :
try:
value = kwargs[ member.name ]
del kwargs[ member.name ]
except KeyError:
value = member.default
self.__setattr__( member.name, member.interpret( value ) )
if len(kwargs) > 0 :
raise AttributeError( "Attribute '%s' not a member of proxy '%s'." % (kwargs.keys()[0], type(self).__name__) )
def __repr__(self) :
"""
Creates a representation of this proxy object. This can be evaluated provided the following includes:
from App.Proxys.All import *
"""
outMembers = []
for member in self._members:
value = self.__getattribute__(member.name)
if self._verbose and PyUtils.safeEqual(value, member.default) : continue
outMember = repr( member.format(value) )
if self._verbose: outMember = member.name + " = " + outMember
outMembers.append( outMember )
return type(self).__name__ + "(" + ','.join(outMembers) + ")"
def setValueAndUpdateObject(self, memberName, value, object, container = None):
"""
Sets the specified value of the specified member and updates the object.
As a container, specify the object (not proxy object) to which this object is attached with an HAS_A or HAS_MANY relationship.
"""
info = self._memberDict[memberName]
newValue = info.interpret(value)
if PyUtils.safeEqual( newValue, self.__getattribute__(memberName) ) : return
self.__setattr__(memberName, newValue)
info.set(object, newValue, container )
def getValueAndInfo(self, index):
"""
Returns a 2-tuple (value, info) for the ith member of this wrapped object.
'value' is the member's value
'info' is a class from module App.Proxys.Member that describes this member
"""
info = self._members[index]
value = self.__getattribute__(info.name)
return ( value, info )
def getMemberCount(self):
"""
Returns the number of members of this proxy class.
"""
return len( self._members )
def createAndFillObject(self, container = None, *args):
"""Same as calling createObject followed by fillObject."""
return self.fillObject( self.createObject(*args), container )
def createObject(self, *args):
"""
Creates a new instance of the object wrapped by this class.
The object will not be filled, you have to call fillObject.
If any extra parameters are specified, they are passed to the object constructor.
Returns the newly created object.
"""
return self._wrappedClass(*args)
def fillObject(self, object, container = None):
"""
Fills every wrapped field in the specified object with the content of this proxy object.
All members are iterated over in the order they were specified.
If the object is an Observable, then the changes are batched.
As a container, specify the object (not proxy object) to which this object is (or will be) attached with an HAS_A or HAS_MANY relationship.
Returns the filled object, the exact same object that was passed in parameter.
"""
try: object.beginBatchChanges()
except AttributeError: pass
try:
for member in self._members:
member.set(object, self.__getattribute__(member.name), container )
finally:
try: object.endBatchChanges()
except AttributeError: pass
return object
def extractFromObject( self, object, recursive = True ):
"""
This method extract the content of the passed object and places it in this wrapper.
Pass recursive = False to only extract non-object members. The members containing object references or list of objects will not be extracted and will remain unchanged.
"""
if self._caster is not None : object = self._caster(object)
for member in self._members:
if not recursive and member.isObject: continue
self.__setattr__( member.name, member.get(object) )
def load(self, *args):
"""
Creates a new instance of the object wrapped by this class,
Fill it with its content,
Then, add it to the framework or to the container object using
the specified loader method.
Returns the newly loaded object.
"""
object = self.createObject(*args)
self.fillObject(object)
if self._loader is not None :
self._loader( object )
return object
# Inherits the parent icon
if _icon is None :
if _parent is not None:
_icon = _parent._icon
# Merge the parent and child's member list
if _parent is not None :
# Merge parent and child members
_members = _parent._members + _members
# Move the child's member to the parent if they have the same name
for i in range( len(_parent._members) ):
for j in range( len(_parent._members), len(_members) ):
if _members[i].name == _members[j].name :
_members[i] = _members[j]
del _members[j]
break
try:
del i
del j
except UnboundLocalError: pass
# Build a dictionnary of members for efficient look-up
_memberDict = {}
for member in _members:
_memberDict[member.name] = member
try: del member
except UnboundLocalError: pass
_nameGetter = staticmethod(_nameGetter)
return type( _wrappedClass.__name__.split('.')[-1], (object,), locals() )
| Python |
'''
Created on 2009-09-02
@author: beaudoin
'''
import Proxy
import Member
import Physics
cls = Physics.SphereCDP
SphereCDP = Proxy.create( cls, verbose = False, caster = Physics.castToSphereCDP,
members = [
Member.Point3d( 'center', (0.0,0.0,0.0), cls.getCenter, cls.setCenter ),
Member.Basic( float, 'radius', 1.0, cls.getRadius, cls.setRadius),
] )
cls = Physics.BoxCDP
BoxCDP = Proxy.create( cls, verbose = False, caster = Physics.castToBoxCDP,
members = [
Member.Point3d( 'point1', (-1.0,-1.0,-1.0), cls.getPoint1, cls.setPoint1 ),
Member.Point3d( 'point2', (1.0,1.0,1.0), cls.getPoint2, cls.setPoint2),
] )
cls = Physics.PlaneCDP
PlaneCDP = Proxy.create( cls, verbose = False, caster = Physics.castToPlaneCDP,
members = [
Member.Vector3d( 'normal', (0.0,1.0,0.0), cls.getNormal, cls.setNormal ),
Member.Point3d( 'origin', (0.0,0.0,0.0), cls.getOrigin, cls.setOrigin),
] )
cls = Physics.CapsuleCDP
CapsuleCDP = Proxy.create( cls, verbose = False, caster = Physics.castToCapsuleCDP,
members = [
Member.Point3d( 'point1', (-1.0,0.0,0.0), cls.getPoint1, cls.setPoint1 ),
Member.Point3d( 'point2', (1.0,0.0,0.0), cls.getPoint2, cls.setPoint2 ),
Member.Basic( float, 'radius', 1.0, cls.getRadius, cls.setRadius),
] )
| Python |
from SNMApp import ObservableList, SnapshotBranch, Snapshot
from CDPs import SphereCDP, BoxCDP, PlaneCDP, CapsuleCDP
from RigidBody import RigidBody, ArticulatedRigidBody
from ArticulatedFigure import ArticulatedFigure, Character
from Joints import BallInSocketJoint, UniversalJoint, HingeJoint, StiffJoint
from SimBiController import SimBiController, ControlParams, SimBiConState, ExternalForce, Trajectory, TrajectoryComponent, LinearBalanceFeedback, IKVMCController, DanceController, WalkController | Python |
'''
Created on 2009-09-03
@author: beaudoin
'''
import Proxy
import Member
import Physics, Core, wx
cls = Physics.ArticulatedFigure
ArticulatedFigure = Proxy.create( cls, loader = Physics.world().addArticulatedFigure,
members = [
Member.Basic( str, 'name', 'UnnamedArticulatedFigure', cls.getName, cls.setName ),
Member.Object( 'root', None, cls.getRoot, cls.setRoot ),
Member.ObjectList( 'arbs', None, cls.getArticulatedRigidBody, cls.getArticulatedRigidBodyCount, cls.addArticulatedRigidBody ),
Member.ObjectList( 'joints', None, cls.getJoint, cls.getJointCount, cls.addJoint )
] )
cls = Core.Character
Character = Proxy.create( cls, parent = ArticulatedFigure, loader = wx.GetApp().addCharacter )
| Python |
from SNMApp import SNMApp
from SnapshotTree import SnapshotBranch, Snapshot
from ObservableList import ObservableList
import Scenario
import InstantChar
import KeyframeEditor | Python |
'''
Created on 2009-10-06
@author: beaudoin
'''
import Scenario, PyUtils, Physics, MathLib, math
class Staircase(Scenario.Scenario):
"""A scenario that can be used to setup a staircase."""
def __init__(self, name = "Staircase scenario", staircaseWidth = 0.9, threadDepth = 0.2 , riserHeight = 0.223, stepCount = 10, position = (0,0,0), angle = 0, leftRampHeight = None, rightRampHeight = None ):
"""Setup a staircase"""
super( Staircase, self ).__init__(name)
self._staircaseWidth = staircaseWidth
self._threadDepth = threadDepth
self._riserHeight = riserHeight
self._stepCount = stepCount
self._position = position
self._angle = angle
self._leftRampHeight = leftRampHeight
self._rightRampHeight = rightRampHeight
self._loaded = False
#
# Accessors
#
def getStairWidth(self):
"""Access the staircase width"""
return self._staircaseWidth
def setStairWidth(self, staircaseWidth):
"""Sets the staircase width"""
self._staircaseWidth = staircaseWidth
def getThreadDepth(self):
"""Access the thread depth"""
return self._staircaseWidth
def setThreadDepth(self, threadDepth):
"""Sets the thread depth"""
self._threadDepth = threadDepth
def getRiserHeight(self):
"""Access the riser height"""
return self._riserHeight
def setRiserHeight(self, riserHeight):
"""Sets the riser height"""
self._riserHeight = riserHeight
def getStepCount(self):
"""Access the number of steps"""
return self._stepCount
def setStepCount(self, stepCount):
"""Sets the number of steps (a Point3d)"""
self._stepCount = stepCount
def getPosition(self):
"""
Access the position.
The position is the location on the ground in the middle of the first "virtual step", that
is, right in front of the first real step.
"""
return self._position
def setPosition(self, position):
"""
Sets the position.
The position is the location on the ground in the middle of the first "virtual step", that
is, right in front of the first real step.
"""
self._position = position
def getAngle(self):
"""Access the angle of rotation along the y axis (in radians)"""
return self._oritentation
def setAngle(self, angle):
"""Sets the angle of rotation along the y axis (in radians)"""
self._angle = angle
def getLeftRampHeight(self):
"""Access the height of the left ramp, or None if no left ramp is desired"""
return self._leftRampHeight
def setLeftRampHeight(self, leftRampHeight):
"""Sets the height of the left ramp, or None if no left ramp is desired"""
self._leftRampHeight = leftRampHeight
def getRightRampHeight(self):
"""Access the height of the right ramp, or None if no right ramp is desired"""
return self._rightRampHeight
def setRightRampHeight(self, rightRampHeight):
"""Sets the height of the right ramp, or None if no right ramp is desired"""
self._rightRampHeight = rightRampHeight
#
# Public methods
#
def load(self):
assert not self._loaded, "Cannot load scenario twice!"
self._loaded = True
# Create the rigid bodies for the main staircase
orientation = PyUtils.angleAxisToQuaternion( (self._angle,(0,1,0)) )
size = MathLib.Vector3d( self._staircaseWidth, self._riserHeight, self._threadDepth )
pos = PyUtils.toPoint3d( self._position ) + MathLib.Vector3d( 0, -self._riserHeight/2.0, 0 )
delta = MathLib.Vector3d(size)
delta.x = 0
delta = orientation.rotate( delta )
for i in range(self._stepCount):
box = PyUtils.RigidBody.createBox( size, pos = pos + delta * (i+1), locked = True, orientation=orientation )
Physics.world().addRigidBody(box)
# Create the rigid bodies for both ramps
rampHeights = ( self._leftRampHeight, self._rightRampHeight )
deltaRamp = MathLib.Vector3d(self._staircaseWidth/2.0,0,0)
deltaRamp = orientation.rotate( deltaRamp )
deltaRamps = (deltaRamp, deltaRamp * -1)
for deltaRamp, rampHeight in zip( deltaRamps, rampHeights ):
if rampHeight is None: continue
deltaRamp.y = rampHeight/2.0
box = PyUtils.RigidBody.createBox( (0.02,rampHeight,0.02), pos = pos + deltaRamp + delta , locked = True, orientation=orientation )
Physics.world().addRigidBody(box)
box = PyUtils.RigidBody.createBox( (0.02,rampHeight,0.02), pos = pos + deltaRamp + (delta * self._stepCount) , locked = True, orientation=orientation )
Physics.world().addRigidBody(box)
deltaRamp.y = rampHeight
rampOrientation = orientation * PyUtils.angleAxisToQuaternion( (math.atan2(self._riserHeight, self._threadDepth), (-1,0,0)) )
rampLen = self._stepCount * math.sqrt( self._riserHeight*self._riserHeight + self._threadDepth*self._threadDepth )
box = PyUtils.RigidBody.createBox( (0.04,0.02,rampLen), pos = pos + deltaRamp + (delta * ((self._stepCount+1) * 0.5)) , locked = True, orientation=rampOrientation )
Physics.world().addRigidBody(box)
| Python |
'''
Created on 2009-10-06
@author: beaudoin
'''
import PyUtils
class Scenario(PyUtils.Observable):
"""The base class for a scenario. Each scenarios should have the following methods:
load()
Called when the scenario has been filled so that its resources can be created (like rigid bodies, etc.)
update()
Called whenever a character has moved and the scenario must update the character-related information."""
def __init__(self, name = "Unnamed scenario"):
"""Initializes a scenario with a specific name"""
super(Scenario, self).__init__()
self._name = name
#
# Accessors
#
def getName(self):
"""Access the scenario name."""
return self._name
def setName(self, name):
"""Sets the name of the scenario."""
self._name = name
| Python |
from Staircase import Staircase | Python |
'''
Created on 2009-12-02
@author: beaudoin
'''
from OpenGL.GL import *
import UI, Core, GLUtils
from MathLib import Point3d, Vector3d, Quaternion
import math
class CheckBoxCallback(GLUtils.GLUICallback):
def __init__(self, editorWindow):
"""Private callback class."""
super(CheckBoxCallback,self).__init__()
self._editorWindow = editorWindow
def execute(self):
"""Callback execution"""
self._editorWindow._checkBoxChanged()
class EditorWindow(GLUtils.GLUIContainer):
def __init__( self, parent, posableCharacter, handlesSide, handlesFront, stanceKneeHandle, swingFootHandleSagittal, swingFootHandleCoronal, swingFootHandleHeight,
time, controller, stanceFootToSwingFootTrajectory, minWidth=-1, minHeight=-1, checkBoxVisible=True):
super(EditorWindow,self).__init__(parent)
self._sizer = GLUtils.GLUIBoxSizer(GLUtils.GLUI_VERTICAL)
self.setSizer(self._sizer)
handleVisible = len(handlesSide) != 0 and handlesSide[0].hasKeyframeAtTime(time)
if checkBoxVisible:
self._checkBox = GLUtils.GLUICheckBox(self,0,0,0,0,-1,-1,handleVisible)
self._sizer.add(self._checkBox, 0, GLUtils.GLUI_EXPAND )
self._callback = CheckBoxCallback(self)
self._checkBox.setCheckBoxCallback(self._callback)
else:
self._spacer = GLUtils.GLUIWindow(self)
self._sizer.add(self._spacer, 1, GLUtils.GLUI_EXPAND )
self._editorSide = CharacterEditorWindow(self,
posableCharacter,
handlesSide,
stanceKneeHandle,
swingFootHandleSagittal,
swingFootHandleHeight,
time,
controller,
stanceFootToSwingFootTrajectory,
ConverterZY(),
0, 0, 0, 0, minWidth, minHeight )
self._sizer.add(self._editorSide)
self._editorSide.addHandlesVisibilityChangedCallback( self._setCheckBox )
self._editorFront = CharacterEditorWindow(self,
posableCharacter,
handlesFront,
None,
swingFootHandleCoronal,
swingFootHandleHeight,
time,
controller,
stanceFootToSwingFootTrajectory,
ConverterXY(),
0, 0, 0, 0, minWidth, minHeight )
self._sizer.add(self._editorFront)
self._editorFront.addHandlesVisibilityChangedCallback( self._setCheckBox )
self._checkBoxCallbacks = []
def addCheckBoxCallback(self, callback):
"""Adds a function that will be called back whenever the checkbox state is changed."""
self._checkBoxCallbacks.append(callback)
def _setCheckBox(self, checked):
try:
if checked != self._checkBox.isChecked():
self._checkBox.setChecked(checked)
except AttributeError:
pass
def _checkBoxChanged(self):
"""Called whenever the checkbox is changed."""
for callback in self._checkBoxCallbacks:
callback(self._checkBox.isChecked())
class CharacterEditorWindow(UI.GLUITools.WindowWithControlPoints):
def __init__( self, parent, posableCharacter, handles, stanceKneeHandle, swingFootHandleX, swingFootHandleY,
time, controller, stanceFootToSwingFootTrajectory, converter, x=0, y=0, width=0, height=0, minWidth=-1, minHeight=-1 ):
"""A keyframe edition window. Character are always forced to left stance."""
super(CharacterEditorWindow,self).__init__(parent,x,y,width,height, minWidth, minHeight, boundsY=(-0.1,1.9), forceAspectRatio='x')
self._converter = converter
self._posableCharacter = posableCharacter
self._character = posableCharacter.getCharacter()
self._handles = handles
self._stanceKneeHandle = stanceKneeHandle
self._swingFootHandleX = swingFootHandleX
self._swingFootHandleY = swingFootHandleY
self._time = time
self._controller = controller
self._stanceFootToSwingFootTrajectory = stanceFootToSwingFootTrajectory
self._lineTorso = [ '_pelvis', 'pelvis_lowerback', 'lowerback_torso', 'torso_head', '_head' ]
self._lArm = [ '_torso', 'lShoulder', 'lElbow', '_lLowerArm' ]
self._rArm = [ '_torso', 'rShoulder', 'rElbow', '_rLowerArm' ]
self._lLeg = [ '_pelvis', 'lHip', 'lKnee', 'lAnkle', 'lToeJoint' ]
self._rLeg = [ '_pelvis', 'rHip', 'rKnee', 'rAnkle', 'rToeJoint' ]
self._handlesVisibilityChangedCallbacks = []
self._handlesVisible = None
self._updateHandlesVisibility()
def _setHandlesVisible(self, visible):
"""Make sure the handles are visible or not."""
if self._handlesVisible == visible: return
self._handlesVisible = visible
self.deleteAllControlPoints()
if visible:
for handle in self._handles:
self.addControlPoint( HandleControlPoint(self._posableCharacter, self._time, self._converter, handle) )
if self._stanceKneeHandle is not None:
self.addControlPoint( HandleStanceKneeControlPoint(self._posableCharacter, self._time, self._converter, self._stanceKneeHandle) )
if self._time > 0 and self._time < 1 and self._swingFootHandleX is not None and self._swingFootHandleY is not None:
self.addControlPoint( SwingFootControlPoint(self._posableCharacter, self._time, self._converter, self._swingFootHandleX, self._swingFootHandleY) )
for callback in self._handlesVisibilityChangedCallbacks:
callback(visible)
def _updateHandlesVisibility(self):
"""Check if the control points should be visible.
Only check the first handle. Assume they all have keyframes at the same time."""
if len(self._handles) == 0 :
return
else:
self._setHandlesVisible( self._handles[0].hasKeyframeAtTime(self._time) )
def addHandlesVisibilityChangedCallback(self, callback):
"""Adds a callback that will be called whenever the visibility of the handles change.
The callback should take one boolean parameter, true if they are visible, false otherwise."""
self._handlesVisibilityChangedCallbacks.append(callback)
def areHandlesVisible(self):
"""Return true if the handles are visible, false otherwise."""
return self._handlesVisible
def drawContent(self):
"""Draw the character from front view."""
self._updateHandlesVisibility()
self._posableCharacter.updatePose( self._time, self._stanceFootToSwingFootTrajectory.evaluate_catmull_rom(self._time) )
try:
glColor3d(0.4,0.5,0.0)
self._drawLine( self._lArm )
self._drawLine( self._lLeg )
glColor3d(1,1,1)
self._drawLine( self._rArm )
self._drawLine( self._rLeg )
self._drawLine( self._lineTorso )
except Exception as e:
glEnd()
print "Exception while drawing scaled character interface: " + str(e)
traceback.print_exc(file=sys.stdout)
def _drawLine(self, line ):
glBegin( GL_LINE_STRIP )
for name in line:
if name[0] == '_' :
pos = self._character.getARBByName(name[1:]).getCMPosition()
else:
joint = self._character.getJointByName(name)
arb = joint.getParent()
pos = arb.getWorldCoordinates( joint.getParentJointPosition() )
glVertex2d( *self._converter.to2d(pos) )
glEnd()
class _Type(object):
circular = 0
perpendicular = 1
class BaseControlPoint(UI.GLUITools.ControlPoint):
def __init__( self, posableCharacter, time, converter ):
super(BaseControlPoint,self).__init__()
self._posableCharacter = posableCharacter
self._character = posableCharacter.getCharacter()
self._time = time
self._converter = converter
class BaseHandleControlPoint(BaseControlPoint):
def __init__( self, posableCharacter, time, converter, handle ):
super(BaseHandleControlPoint,self).__init__(posableCharacter, time, converter)
self._handle = handle
perpendicularSpeed = 3 # Increase this to make perpendicular handle rotate more quickly
stanceKneeSpeed = 4.2 # Increase this to make stance knee handle rotate more quickly
class HandleControlPoint(BaseHandleControlPoint):
def __init__( self, posableCharacter, time, converter, handle ):
super(HandleControlPoint,self).__init__(posableCharacter, time, converter, handle)
characterJointName = handle.getJointName().replace("STANCE_","l").replace("SWING_","r")
joint = self._character.getJointByName( characterJointName )
self._jointChild = joint.getChild()
self._pivotPosInChild = joint.getChildJointPosition()
self._handlePosInChild = handle.getPosInChild()
type = handle.getType()
if type == 'circular':
self._type = _Type.circular
self._sign = 1
elif type == 'reverseCircular':
self._type = _Type.circular
self._sign = -1
elif type == 'perpendicular':
self._type = _Type.perpendicular
self._sign = 1
elif type == 'reversePerpendicular':
self._type = _Type.perpendicular
self._sign = -1
else:
raise ValueError( 'Handle, supported type = "circular", "reverseCircular", "perpendicular", or "reversePerpendicular"' )
def getPos(self):
posHandle = self._jointChild.getWorldCoordinates( self._handlePosInChild )
return self._converter.to2d( posHandle )
def setPos(self, pos):
if self._type == _Type.circular :
posPivot = self._jointChild.getWorldCoordinates( self._pivotPosInChild )
self._converter.project( posPivot )
v1 = Vector3d( posPivot, Point3d(*self._converter.to3d(self._previousMousePos) ) ).unit()
v2 = Vector3d( posPivot, Point3d(*self._converter.to3d(pos) ) ).unit()
cos = v1.dotProductWith(v2)
sin = v1.crossProductWith(v2).dotProductWith(Vector3d(*self._converter.normal())) * self._sign
theta = math.atan2(sin, cos)
else: # _Type.perpendicular
posPivot = self._jointChild.getWorldCoordinates( self._pivotPosInChild )
handlePos = self._jointChild.getWorldCoordinates( self._handlePosInChild )
vector = Vector3d( posPivot, handlePos ).unit().crossProductWith( Vector3d(*self._converter.normal()) )
vector2d = self._converter.to2d( vector )
theta = (vector2d[0]*(pos[0]-self._previousMousePos[0]) + vector2d[1]*(pos[1]-self._previousMousePos[1])) * self._sign * perpendicularSpeed
index = self._handle.getIndexForTime(self._time)
if index is None:
return
value = self._handle.getKeyframeValue(index)
self._handle.setKeyframeValue(index, value+theta)
class HandleStanceKneeControlPoint(BaseHandleControlPoint):
def __init__( self, posableCharacter, time, converter, handle ):
super(HandleStanceKneeControlPoint,self).__init__(posableCharacter, time, converter, handle)
joint = self._character.getJointByName( 'lHip' )
self._jointChild = joint.getChild()
self._handlePosInChild = joint.getChildJointPosition()
def getPos(self):
posHandle = self._jointChild.getWorldCoordinates( self._handlePosInChild )
return self._converter.to2d( posHandle )
def setPos(self, pos):
theta = (self._previousMousePos[1]-pos[1]) * stanceKneeSpeed
index = self._handle.getIndexForTime(self._time)
if index is None:
return
value = self._handle.getKeyframeValue(index)
self._handle.setKeyframeValue(index, value+theta)
class SwingFootControlPoint(BaseControlPoint):
def __init__( self, posableCharacter, time, converter, handleX, handleY ):
super(SwingFootControlPoint,self).__init__(posableCharacter, time, converter)
joint = self._character.getJointByName( 'rAnkle' )
self._jointChild = joint.getChild()
self._handlePosInChild = joint.getChildJointPosition()
self._handleX = handleX
self._handleY = handleY
def getPos(self):
posHandle = self._jointChild.getWorldCoordinates( self._handlePosInChild )
return self._converter.to2d( posHandle )
def setPos(self, pos):
deltaX = pos[0]-self._previousMousePos[0]
deltaY = pos[1]-self._previousMousePos[1]
index = self._handleX.getIndexForTime(self._time)
if index is None:
return
assert index == self._handleY.getIndexForTime(self._time), 'Unexpected error: handle X and Y keyframes are out-of-sync.'
valueX = self._handleX.getKeyframeValue(index)
valueY = self._handleY.getKeyframeValue(index)
self._handleX.setKeyframeValue(index, valueX+deltaX)
self._handleY.setKeyframeValue(index, valueY+deltaY)
return
class ConverterZY(object):
def to2d(self,vec):
return (vec.z, vec.y)
def to3d(self,vec):
return (0, vec[1], vec[0])
def normal(self):
return (1,0,0)
def project(self, vec):
vec.x = 0
class ConverterXY(object):
def to2d(self,vec):
return (vec.x, vec.y)
def to3d(self,vec):
return (vec[0], vec[1], 0)
def normal(self):
return (0,0,1)
def project(self, vec):
vec.z = 0
| Python |
'''
Created on 2009-12-08
@author: beaudoin
'''
import Core
from MathLib import Vector3d, Point3d, Quaternion
import math
class PosableCharacter(object):
"""A character that can be posed and edited in the keyframe editor."""
def __init__(self, character, controller):
"""Initializes and attach to a standard Core.Character and Core.SimBiController."""
self._character = character
self._controller = controller
self._leftLowerLeg = self._character.getARBByName('lLowerLeg')
self._rightLowerLeg = self._character.getARBByName('rLowerLeg')
self._leftFoot = self._character.getARBByName('lFoot')
self._rightFoot = self._character.getARBByName('rFoot')
self._leftHipJointIndex = self._character.getJointIndex( "lHip" )
self._rightHipJointIndex = self._character.getJointIndex( "rHip" )
self._leftHipJointIndex = self._character.getJointIndex( "lHip" )
self._rightHipJointIndex = self._character.getJointIndex( "rHip" )
self._leftKneeJointIndex = self._character.getJointIndex( "lKnee" )
self._rightKneeJointIndex = self._character.getJointIndex( "rKnee" )
self._leftAnkleJointIndex = self._character.getJointIndex( "lAnkle" )
self._rightAnkleJointIndex = self._character.getJointIndex( "rAnkle" )
self._leftHipJoint = self._character.getJoint( self._leftHipJointIndex )
self._rightHipJoint = self._character.getJoint( self._rightHipJointIndex )
self._leftHipJoint = self._character.getJoint( self._leftHipJointIndex )
self._rightHipJoint = self._character.getJoint( self._rightHipJointIndex )
self._leftKneeJoint = self._character.getJoint( self._leftKneeJointIndex )
self._rightKneeJoint = self._character.getJoint( self._rightKneeJointIndex )
self._leftAnkle = self._character.getJoint( self._leftAnkleJointIndex )
self._rightAnkle = self._character.getJoint( self._rightAnkleJointIndex )
self._pelvis = self._leftHipJoint.getParent()
self._leftUpperLeg = self._leftHipJoint.getChild()
self._rightUpperLeg = self._rightHipJoint.getChild()
self._leftAnkleInStanceLowerLeg = self._leftAnkle.getParentJointPosition()
self._rightAnkleInStanceLowerLeg = self._rightAnkle.getParentJointPosition()
self._leftHipJointInPelvis = self._leftHipJoint.getParentJointPosition()
self._rightHipJointInPelvis = self._rightHipJoint.getParentJointPosition()
self._leftHipJointInPelvis = self._leftHipJoint.getParentJointPosition()
self._rightHipJointInPelvis = self._rightHipJoint.getParentJointPosition()
self._leftHipToKneeVectorInUpperLeg = Vector3d( self._leftHipJoint.getChildJointPosition(),
self._leftKneeJoint.getParentJointPosition() )
self._rightHipToKneeVectorInUpperLeg = Vector3d( self._rightHipJoint.getChildJointPosition(),
self._rightKneeJoint.getParentJointPosition() )
self._leftKneeToAnkleVectorInLowerLeg = Vector3d( self._leftKneeJoint.getChildJointPosition(),
self._leftAnkle.getParentJointPosition() )
self._rightKneeToAnkleVectorInLowerLeg = Vector3d( self._rightKneeJoint.getChildJointPosition(),
self._rightAnkle.getParentJointPosition() )
def getCharacter(self):
"""Access the character."""
return self._character
def updatePose(self, time, stanceFootToSwingFoot, stance = Core.LEFT_STANCE, dontMoveStanceAnkle = False):
"""Updates the pose of the character to match the one at the specified time.
Always use left stance.
stanceFootToSwingFoot is a Vector3d for the vector linking the stance foot to the swing foot
"""
# Setup the stance leg
if stance == Core.LEFT_STANCE:
stanceLowerLeg = self._leftLowerLeg
stanceAnkleInStanceLowerLeg = self._leftAnkleInStanceLowerLeg
stanceHipJointInPelvis = self._leftHipJointInPelvis
stanceHipJointIndex = self._leftHipJointIndex
swingHipJointInPelvis = self._rightHipJointInPelvis
swingHipToKneeVectorInUpperLeg = self._rightHipToKneeVectorInUpperLeg
swingKneeToAnkleVectorInLowerLeg = self._rightKneeToAnkleVectorInLowerLeg
swingKneeJointIndex = self._rightKneeJointIndex
swingHipJointIndex = self._rightHipJointIndex
else:
stanceLowerLeg = self._rightLowerLeg
stanceAnkleInStanceLowerLeg = self._rightAnkleInStanceLowerLeg
stanceHipJointInPelvis = self._rightHipJointInPelvis
stanceHipJointIndex = self._rightHipJointIndex
swingHipJointInPelvis = self._leftHipJointInPelvis
swingHipToKneeVectorInUpperLeg = self._leftHipToKneeVectorInUpperLeg
swingKneeToAnkleVectorInLowerLeg = self._leftKneeToAnkleVectorInLowerLeg
swingKneeJointIndex = self._leftKneeJointIndex
swingHipJointIndex = self._leftHipJointIndex
if dontMoveStanceAnkle :
finalStanceAnkleInWorld = stanceLowerLeg.getWorldCoordinates( stanceAnkleInStanceLowerLeg )
pose = Core.ReducedCharacterStateArray()
self._controller.updateTrackingPose(pose, time, stance)
reducedCharacter = Core.ReducedCharacterState(pose)
# Update stanceFootToSwingFoot, adding in the delta
stanceFootToSwingFoot += self._controller.computeSwingFootDelta(time, stance)
# Recenter and reorient the character
pos = reducedCharacter.getPosition()
pos.x = pos.z = 0
reducedCharacter.setPosition(pos)
reducedCharacter.setOrientation(Quaternion())
self._character.setState(pose, 0, False)
leftFootWorldOrientation = self._leftFoot.getOrientation()
rightFootWorldOrientation = self._rightFoot.getOrientation()
currentStanceAnkleInWorld = stanceLowerLeg.getWorldCoordinates( stanceAnkleInStanceLowerLeg )
currentStanceAnkleInPelvis = self._pelvis.getLocalCoordinates( currentStanceAnkleInWorld )
currentStanceHipToAnkleInPelvis = Vector3d( stanceHipJointInPelvis, currentStanceAnkleInPelvis )
lengthStanceHipToAnkle = currentStanceHipToAnkleInPelvis.length()
stanceHipJointInWorld = self._pelvis.getWorldCoordinates(stanceHipJointInPelvis)
desiredStanceAnkleInWorld = Point3d(stanceFootToSwingFoot*-0.5)
yLentgh2 = lengthStanceHipToAnkle*lengthStanceHipToAnkle - \
desiredStanceAnkleInWorld.x*desiredStanceAnkleInWorld.x - \
desiredStanceAnkleInWorld.z*desiredStanceAnkleInWorld.z
if yLentgh2 <= 0:
desiredStanceAnkleInWorld.y = stanceHipJointInWorld.y
else:
desiredStanceAnkleInWorld.y = stanceHipJointInWorld.y - math.sqrt( yLentgh2 )
desiredStanceAnkleInPelvis = self._pelvis.getLocalCoordinates( desiredStanceAnkleInWorld )
desiredStanceHipToAnkleInPelvis = Vector3d( stanceHipJointInPelvis, desiredStanceAnkleInPelvis )
currentStanceHipToAnkleInPelvis.toUnit()
desiredStanceHipToAnkleInPelvis.toUnit()
rot = Quaternion( currentStanceHipToAnkleInPelvis, desiredStanceHipToAnkleInPelvis )
currQuat = reducedCharacter.getJointRelativeOrientation(stanceHipJointIndex)
currQuat *= rot
reducedCharacter.setJointRelativeOrientation(currQuat, stanceHipJointIndex)
pos = reducedCharacter.getPosition()
pos.y -= desiredStanceAnkleInWorld.y
reducedCharacter.setPosition(pos)
self._character.setState(pose, 0, False)
# Setup the swing leg
currentStanceAnkleInWorld = stanceLowerLeg.getWorldCoordinates( stanceAnkleInStanceLowerLeg )
desiredSwingAnkleInWorld = currentStanceAnkleInWorld + stanceFootToSwingFoot
targetInPelvis = self._pelvis.getLocalCoordinates( desiredSwingAnkleInWorld )
qParent = Quaternion()
qChild = Quaternion()
Core.TwoLinkIK_getIKOrientations(
swingHipJointInPelvis,
targetInPelvis,
Vector3d(-1,0,0), swingHipToKneeVectorInUpperLeg,
Vector3d(-1,0,0), swingKneeToAnkleVectorInLowerLeg, qParent, qChild)
reducedCharacter.setJointRelativeOrientation(qChild, swingKneeJointIndex)
reducedCharacter.setJointRelativeOrientation(qParent, swingHipJointIndex)
self._character.setState(pose,0,False)
leftAnkleLocalOrientation = self._leftLowerLeg.getOrientation().getInverse() * leftFootWorldOrientation
rightAnkleLocalOrientation = self._rightLowerLeg.getOrientation().getInverse() * rightFootWorldOrientation
reducedCharacter.setJointRelativeOrientation(leftAnkleLocalOrientation, self._leftAnkleJointIndex)
reducedCharacter.setJointRelativeOrientation(rightAnkleLocalOrientation, self._rightAnkleJointIndex)
if dontMoveStanceAnkle :
delta = finalStanceAnkleInWorld - stanceLowerLeg.getWorldCoordinates( stanceAnkleInStanceLowerLeg )
pos = reducedCharacter.getPosition() + delta
reducedCharacter.setPosition(pos)
self._character.setState(pose,0,False)
| Python |
'''
Created on 2009-12-02
@author: beaudoin
'''
import math, MathLib
class BaseHandle(object):
def __init__( self, trajectory, oppositeTrajectory = None, reverseOppositeJoint = False, minValue = -1000, maxValue = 1000 ):
"""
Creates a handle that can be manipulated and is linked to a trajectory
"""
self._trajectory = trajectory
self._oppositeTrajectory = oppositeTrajectory
if reverseOppositeJoint:
self._oppositeSign = -1
else:
self._oppositeSign = 1
self._minValue = minValue
self._maxValue = maxValue
def forceKeyframesAt(self,forcedTimeArray,allowedTimeArray):
"""Make sure keyframes are found only at specific times.
The function first create keyframes at every time found in forcedTimeArray.
Then it deletes any keyframe that does not fall in allowedTimeArray.
In general all the times in forcedTimeArray should also be found in allowedTimeArray.
The resulting curve will be an approximation of the current curve."""
values = []
for time in forcedTimeArray:
values.append( (time, self._trajectory.evaluate_catmull_rom(time)) )
for time in allowedTimeArray:
if forcedTimeArray.count(time) == 0:
index = self.getIndexForTime(time)
if index is not None:
values.append( (time, self._trajectory.getKnotValue(index)) )
self._trajectory.clear()
for time, value in values:
self._trajectory.addKnot(time, value)
def addKeyframeAt(self,time):
"""Adds a single keyframe at the specified time. The curve might be slightly modified as a result."""
self._trajectory.addKnot(time, self._trajectory.evaluate_catmull_rom(time))
def getIndexForTime(self, time):
"""Gets the index for the keyframe at the specified time. None if no keyframe found at that time."""
for i in range( self._trajectory.getKnotCount() ):
if math.fabs( time - self._trajectory.getKnotPosition(i) ) < 0.00001 :
return i
return None
def removeKeyframe(self, index):
"""Removes the keyframe at the specified index."""
self._trajectory.removeKnot(index)
def hasKeyframeAtTime(self, time):
"""Checks if this handle has a keyframe at the specified time."""
return self.getIndexForTime(time) != None
def getKeyframeValue(self, index):
"""Returns the keyframe value at the specified index"""
return self._trajectory.getKnotValue(index)
def enforceSymmetry(self):
"""Make sure the beginning of the trajectory matches the end (of the other stance)."""
self.setKeyframeValue(0, self.getKeyframeValue(0))
def setKeyframeValue(self, index, value):
"""Changes the value of the keyframe at the specified index."""
value = self.clampValue(value)
self._trajectory.setKnotValue(index, value)
# This forces the symmetry
lastIndex = self._trajectory.getKnotCount()-1
try:
otherLastIndex = self._oppositeTrajectory.getKnotCount()-1
if lastIndex != otherLastIndex or self._trajectory.getKnotPosition(lastIndex) != self._oppositeTrajectory.getKnotPosition(lastIndex) :
return
if index == 0 :
self._oppositeTrajectory.setKnotValue(lastIndex, value * self._oppositeSign )
elif index == lastIndex :
self._oppositeTrajectory.setKnotValue(0, value * self._oppositeSign )
except AttributeError:
if index == 0 :
self._trajectory.setKnotValue(lastIndex, value * self._oppositeSign )
elif index == lastIndex :
self._trajectory.setKnotValue(0, value * self._oppositeSign )
def clampValue(self, value):
"""Return the value clamped to lie between supported extreme values."""
if value < self._minValue: return self._minValue
if value > self._maxValue: return self._maxValue
return value
class Handle(BaseHandle):
def __init__( self, controller, jointName, componentIndex, type='unknown', posInChild=MathLib.Point3d(), oppositeJointName = None, reverseOppositeJoint = False, minValue = -1000, maxValue = 1000 ):
"""
Creates a handle that can be used to access a specific component in a controller in a standard (direct) way.
type should be 'circular', 'reverseCircular', 'perpendicular', 'reversePerpendicular' or 'unknown' to indicate how the handle behaves
posInChild should be of type MathLib.Point3d
reverse should be True if the handle works in a reverse way (i.e. going clockwise increase the angle (?))
oppositeJointName should be the name of the corresponding joint on the other stance.
reverseOppositeJoint should be True if the opposite joint sign is different
"""
self._controller = controller
self._jointName = jointName
trajectory = controller.getState(0).getTrajectory(jointName).getTrajectoryComponent(componentIndex).getBaseTrajectory()
self._oppositeJointName = oppositeJointName
self._posInChild = posInChild
if oppositeJointName is not None:
oppositeTrajectory = controller.getState(0).getTrajectory(oppositeJointName).getTrajectoryComponent(componentIndex).getBaseTrajectory()
else:
oppositeTrajectory = None
self._type = type
super(Handle,self).__init__(trajectory, oppositeTrajectory, reverseOppositeJoint, minValue, maxValue)
def getController(self):
"""Gets the controller associated with that handle."""
return self._controller
def getJointName(self):
"""Return the joint name for this handle."""
return self._jointName
def getType(self):
"""Return the type desired for this handle."""
return self._type
def getPosInChild(self):
"""Return the position of the handle in child coordinate."""
return self._posInChild
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.