code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
HttpRequest: Function that wraps google.appengine.api.urlfetch.Fetch in a
common interface which is used by gdata.service.GDataService. In other
words, this module can be used as the gdata service request handler so
that all HTTP requests will be performed by the hosting Google App Engine
server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import atom.service
import atom.http_interface
from google.appengine.api import urlfetch
def run_on_appengine(gdata_service):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member.
"""
gdata_service.http_client = AppEngineHttpClient()
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = len(data_str)
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers))
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This function is deprecated, use AppEngineHttpClient.request instead.
To use this module with gdata.service, you can set this module to be the
http_request_handler so that HTTP requests use Google App Engine's urlfetch.
import gdata.service
import gdata.urlfetch
gdata.service.http_request_handler = gdata.urlfetch
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, partial_uri) = atom.service.ProcessUrl(service, full_uri)
# Construct the full URL for the request.
if ssl:
full_url = 'https://%s%s' % (server, partial_uri)
else:
full_url = 'http://%s%s' % (server, partial_uri)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# Construct the dictionary of HTTP headers.
headers = {}
if isinstance(service.additional_headers, dict):
headers = service.additional_headers.copy()
if isinstance(extra_headers, dict):
for header, value in extra_headers.iteritems():
headers[header] = value
# Add the content type header (we don't need to calculate content length,
# since urlfetch.Fetch will calculate for us).
if content_type:
headers['Content-Type'] = content_type
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=full_url, payload=data_str,
method=method, headers=headers))
def __ConvertDataPart(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to interact with the Blogger server."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import gdata.service
import gdata.blogger
class BloggerService(gdata.service.GDataService):
def __init__(self, email=None, password=None, source=None,
server=None, api_key=None,
additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='blogger', source=source,
server=server,
additional_headers=additional_headers)
self.accountType = 'GOOGLE'
def GetBlogFeed(self, uri=None):
"""Retrieve a list of the blogs to which the current user may manage."""
if not uri:
uri = 'http://www.blogger.com/feeds/default/blogs'
return self.Get(uri, converter=gdata.blogger.BlogFeedFromString)
def GetBlogCommentFeed(self, blog_id=None, uri=None):
"""Retrieve a list of the comments for this blog."""
if blog_id:
uri = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def GetBlogPostFeed(self, blog_id=None, uri=None):
if blog_id:
uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id
return self.Get(uri, converter=gdata.blogger.BlogPostFeedFromString)
def GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None):
"""Retrieve a list of the comments for this particular blog post."""
if blog_id and post_id:
uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (blog_id,
post_id)
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def AddPost(self, entry, blog_id=None, uri=None):
if blog_id:
uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id
return self.Post(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def UpdatePost(self, entry, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Put(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def DeletePost(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
def AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None):
"""Adds a new comment to the specified blog post."""
if blog_id and post_id:
uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (
blog_id, post_id)
return self.Post(comment_entry, uri,
converter=gdata.blogger.CommentEntryFromString)
def DeleteComment(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
class BlogQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None):
"""Constructs a query object for the list of a user's Blogger blogs.
Args:
feed: str (optional) The beginning of the URL to be queried. If the
feed is not set, and there is no blog_id passed in, the default
value is used ('http://www.blogger.com/feeds/default/blogs').
params: dict (optional)
categories: list (optional)
blog_id: str (optional)
"""
if not feed and blog_id:
feed = 'http://www.blogger.com/feeds/default/blogs/%s' % blog_id
elif not feed:
feed = 'http://www.blogger.com/feeds/default/blogs'
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogPostQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None):
if not feed and blog_id and post_id:
feed = 'http://www.blogger.com/feeds/%s/posts/default/%s' % (blog_id,
post_id)
elif not feed and blog_id:
feed = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogCommentQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None, comment_id=None):
if not feed and blog_id and comment_id:
feed = 'http://www.blogger.com/feeds/%s/comments/default/%s' % (
blog_id, comment_id)
elif not feed and blog_id and post_id:
feed = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (
blog_id, post_id)
elif not feed and blog_id:
feed = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Blogger."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import atom
import gdata
import re
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0'
class BloggerEntry(gdata.GDataEntry):
"""Adds convenience methods inherited by all Blogger entries."""
blog_name_pattern = re.compile('(http://)(\w*)')
blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
def GetBlogId(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
return self.blog_id_pattern.match(self.id.text).group(2)
return None
def GetBlogName(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return self.blog_name_pattern.match(link.href).group(2)
return None
class BlogEntry(BloggerEntry):
"""Describes a blog entry in the feed listing a user's blogs."""
def BlogEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogEntry, xml_string)
class BlogFeed(gdata.GDataFeed):
"""Describes a feed of a user's blogs."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry])
def BlogFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogFeed, xml_string)
class BlogPostEntry(BloggerEntry):
"""Describes a blog post entry in the feed of a blog's posts."""
post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
def AddLabel(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label))
def GetPostId(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.post_id_pattern.match(self.id.text).group(4)
return None
def BlogPostEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostEntry, xml_string)
class BlogPostFeed(gdata.GDataFeed):
"""Describes a feed of a blog's posts."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry])
def BlogPostFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostFeed, xml_string)
class InReplyTo(atom.AtomBase):
_tag = 'in-reply-to'
_namespace = THR_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['ref'] = 'ref'
_attributes['source'] = 'source'
_attributes['type'] = 'type'
def __init__(self, href=None, ref=None, source=None, type=None,
extension_elements=None, extension_attributes=None, text=None):
self.href = href
self.ref = ref
self.source = source
self.type = type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def InReplyToFromString(xml_string):
return atom.CreateClassFromXMLString(InReplyTo, xml_string)
class CommentEntry(BloggerEntry):
"""Describes a blog post comment entry in the feed of a blog post's
comments."""
_children = BloggerEntry._children.copy()
_children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo)
comment_id_pattern = re.compile('.*-(\w*)$')
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
in_reply_to=None, extension_elements=None, extension_attributes=None,
text=None):
BloggerEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
self.in_reply_to = in_reply_to
def GetCommentId(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.comment_id_pattern.match(self.id.text).group(1)
return None
def CommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CommentEntry, xml_string)
class CommentFeed(gdata.GDataFeed):
"""Describes a feed of a blog post's comments."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry])
def CommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CommentFeed, xml_string)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Blogger."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import atom
import gdata
import re
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0'
class BloggerEntry(gdata.GDataEntry):
"""Adds convenience methods inherited by all Blogger entries."""
blog_name_pattern = re.compile('(http://)(\w*)')
blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
def GetBlogId(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
return self.blog_id_pattern.match(self.id.text).group(2)
return None
def GetBlogName(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return self.blog_name_pattern.match(link.href).group(2)
return None
class BlogEntry(BloggerEntry):
"""Describes a blog entry in the feed listing a user's blogs."""
def BlogEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogEntry, xml_string)
class BlogFeed(gdata.GDataFeed):
"""Describes a feed of a user's blogs."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry])
def BlogFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogFeed, xml_string)
class BlogPostEntry(BloggerEntry):
"""Describes a blog post entry in the feed of a blog's posts."""
post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
def AddLabel(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label))
def GetPostId(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.post_id_pattern.match(self.id.text).group(4)
return None
def BlogPostEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostEntry, xml_string)
class BlogPostFeed(gdata.GDataFeed):
"""Describes a feed of a blog's posts."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry])
def BlogPostFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostFeed, xml_string)
class InReplyTo(atom.AtomBase):
_tag = 'in-reply-to'
_namespace = THR_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['ref'] = 'ref'
_attributes['source'] = 'source'
_attributes['type'] = 'type'
def __init__(self, href=None, ref=None, source=None, type=None,
extension_elements=None, extension_attributes=None, text=None):
self.href = href
self.ref = ref
self.source = source
self.type = type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def InReplyToFromString(xml_string):
return atom.CreateClassFromXMLString(InReplyTo, xml_string)
class CommentEntry(BloggerEntry):
"""Describes a blog post comment entry in the feed of a blog post's
comments."""
_children = BloggerEntry._children.copy()
_children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo)
comment_id_pattern = re.compile('.*-(\w*)$')
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
in_reply_to=None, extension_elements=None, extension_attributes=None,
text=None):
BloggerEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
self.in_reply_to = in_reply_to
def GetCommentId(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.comment_id_pattern.match(self.id.text).group(1)
return None
def CommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CommentEntry, xml_string)
class CommentFeed(gdata.GDataFeed):
"""Describes a feed of a blog post's comments."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry])
def CommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CommentFeed, xml_string)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to interact with the Blogger server."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import gdata.service
import gdata.blogger
class BloggerService(gdata.service.GDataService):
def __init__(self, email=None, password=None, source=None,
server=None, api_key=None,
additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='blogger', source=source,
server=server,
additional_headers=additional_headers)
self.accountType = 'GOOGLE'
def GetBlogFeed(self, uri=None):
"""Retrieve a list of the blogs to which the current user may manage."""
if not uri:
uri = 'http://www.blogger.com/feeds/default/blogs'
return self.Get(uri, converter=gdata.blogger.BlogFeedFromString)
def GetBlogCommentFeed(self, blog_id=None, uri=None):
"""Retrieve a list of the comments for this blog."""
if blog_id:
uri = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def GetBlogPostFeed(self, blog_id=None, uri=None):
if blog_id:
uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id
return self.Get(uri, converter=gdata.blogger.BlogPostFeedFromString)
def GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None):
"""Retrieve a list of the comments for this particular blog post."""
if blog_id and post_id:
uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (blog_id,
post_id)
return self.Get(uri, converter=gdata.blogger.CommentFeedFromString)
def AddPost(self, entry, blog_id=None, uri=None):
if blog_id:
uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id
return self.Post(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def UpdatePost(self, entry, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Put(entry, uri,
converter=gdata.blogger.BlogPostEntryFromString)
def DeletePost(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
def AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None):
"""Adds a new comment to the specified blog post."""
if blog_id and post_id:
uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (
blog_id, post_id)
return self.Post(comment_entry, uri,
converter=gdata.blogger.CommentEntryFromString)
def DeleteComment(self, entry=None, uri=None):
if not uri:
uri = entry.GetEditLink().href
return self.Delete(uri)
class BlogQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None):
"""Constructs a query object for the list of a user's Blogger blogs.
Args:
feed: str (optional) The beginning of the URL to be queried. If the
feed is not set, and there is no blog_id passed in, the default
value is used ('http://www.blogger.com/feeds/default/blogs').
params: dict (optional)
categories: list (optional)
blog_id: str (optional)
"""
if not feed and blog_id:
feed = 'http://www.blogger.com/feeds/default/blogs/%s' % blog_id
elif not feed:
feed = 'http://www.blogger.com/feeds/default/blogs'
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogPostQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None):
if not feed and blog_id and post_id:
feed = 'http://www.blogger.com/feeds/%s/posts/default/%s' % (blog_id,
post_id)
elif not feed and blog_id:
feed = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
class BlogCommentQuery(gdata.service.Query):
def __init__(self, feed=None, params=None, categories=None, blog_id=None,
post_id=None, comment_id=None):
if not feed and blog_id and comment_id:
feed = 'http://www.blogger.com/feeds/%s/comments/default/%s' % (
blog_id, comment_id)
elif not feed and blog_id and post_id:
feed = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (
blog_id, post_id)
elif not feed and blog_id:
feed = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id
gdata.service.Query.__init__(self, feed=feed, params=params,
categories=categories)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import urllib
import gdata
import atom.service
import gdata.service
import gdata.apps
import atom
API_VER="2.0"
HTTP_OK=200
UNKOWN_ERROR=1000
USER_DELETED_RECENTLY=1100
USER_SUSPENDED=1101
DOMAIN_USER_LIMIT_EXCEEDED=1200
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
DOMAIN_SUSPENDED=1202
DOMAIN_FEATURE_UNAVAILABLE=1203
ENTITY_EXISTS=1300
ENTITY_DOES_NOT_EXIST=1301
ENTITY_NAME_IS_RESERVED=1302
ENTITY_NAME_NOT_VALID=1303
INVALID_GIVEN_NAME=1400
INVALID_FAMILY_NAME=1401
INVALID_PASSWORD=1402
INVALID_USERNAME=1403
INVALID_HASH_FUNCTION_NAME=1404
INVALID_HASH_DIGGEST_LENGTH=1405
INVALID_EMAIL_ADDRESS=1406
INVALID_QUERY_PARAMETER_VALUE=1407
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
DEFAULT_QUOTA_LIMIT='2048'
class Error(Exception):
pass
class AppsForYourDomainException(Error):
def __init__(self, response):
Error.__init__(self, response)
try:
self.element_tree = ElementTree.fromstring(response['body'])
self.error_code = int(self.element_tree[0].attrib['errorCode'])
self.reason = self.element_tree[0].attrib['reason']
self.invalidInput = self.element_tree[0].attrib['invalidInput']
except:
self.error_code = UNKOWN_ERROR
class AppsService(gdata.service.GDataService):
"""Client for the Google Apps Provisioning service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='www.google.com', additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='apps', source=source,
server=server,
additional_headers=additional_headers)
self.ssl = True
self.port = 443
self.domain = domain
def _baseURL(self):
return "/a/feeds/%s" % self.domain
def GetGeneratorFromLinkFinder(self, link_finder, func):
"""returns a generator for pagination"""
yield link_finder
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
yield next_feed
next = next_feed.GetNextLink()
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def RetrievePageOfEmailLists(self, start_email_list_name=None):
"""Retrieve one page of email list"""
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
if start_email_list_name is not None:
uri += "?startEmailListName=%s" % start_email_list_name
try:
return gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllEmailLists(self):
"""Retrieve all email list of a domain."""
ret = self.RetrievePageOfEmailLists()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RetrieveEmailList(self, list_name):
"""Retreive a single email list by the list's name."""
uri = "%s/emailList/%s/%s" % (
self._baseURL(), API_VER, list_name)
try:
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveEmailLists(self, recipient):
"""Retrieve All Email List Subscriptions for an Email Address."""
uri = "%s/emailList/%s?recipient=%s" % (
self._baseURL(), API_VER, recipient)
try:
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RemoveRecipientFromEmailList(self, recipient, list_name):
"""Remove recipient from email list."""
uri = "%s/emailList/%s/%s/recipient/%s" % (
self._baseURL(), API_VER, list_name, recipient)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfRecipients(self, list_name, start_recipient=None):
"""Retrieve one page of recipient of an email list. """
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
if start_recipient is not None:
uri += "?startRecipient=%s" % start_recipient
try:
return gdata.apps.EmailListRecipientFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllRecipients(self, list_name):
"""Retrieve all recipient of an email list."""
ret = self.RetrievePageOfRecipients(list_name)
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListRecipientFeedFromString)
def AddRecipientToEmailList(self, recipient, list_name):
"""Add a recipient to a email list."""
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
recipient_entry = gdata.apps.EmailListRecipientEntry()
recipient_entry.who = gdata.apps.Who(email=recipient)
try:
return gdata.apps.EmailListRecipientEntryFromString(
str(self.Post(recipient_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteEmailList(self, list_name):
"""Delete a email list"""
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateEmailList(self, list_name):
"""Create a email list. """
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
email_list_entry = gdata.apps.EmailListEntry()
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
try:
return gdata.apps.EmailListEntryFromString(
str(self.Post(email_list_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteNickname(self, nickname):
"""Delete a nickname"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfNicknames(self, start_nickname=None):
"""Retrieve one page of nicknames in the domain"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
if start_nickname is not None:
uri += "?startNickname=%s" % start_nickname
try:
return gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllNicknames(self):
"""Retrieve all nicknames in the domain"""
ret = self.RetrievePageOfNicknames()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNicknames(self, user_name):
"""Retrieve nicknames of the user"""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNickname(self, nickname):
"""Retrieve a nickname.
Args:
nickname: string The nickname to retrieve
Returns:
gdata.apps.NicknameEntry
"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateNickname(self, user_name, nickname):
"""Create a nickname"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
nickname_entry = gdata.apps.NicknameEntry()
nickname_entry.login = gdata.apps.Login(user_name=user_name)
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
try:
return gdata.apps.NicknameEntryFromString(
str(self.Post(nickname_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteUser(self, user_name):
"""Delete a user account"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def UpdateUser(self, user_name, user_entry):
"""Update a user account."""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateUser(self, user_name, family_name, given_name, password,
suspended='false', quota_limit=None,
password_hash_function=None):
"""Create a user account. """
uri = "%s/user/%s" % (self._baseURL(), API_VER)
user_entry = gdata.apps.UserEntry()
user_entry.login = gdata.apps.Login(
user_name=user_name, password=password, suspended=suspended,
hash_function_name=password_hash_function)
user_entry.name = gdata.apps.Name(family_name=family_name,
given_name=given_name)
if quota_limit is not None:
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
try:
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def SuspendUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'true':
user_entry.login.suspended = 'true'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RestoreUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'false':
user_entry.login.suspended = 'false'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RetrieveUser(self, user_name):
"""Retrieve an user account.
Args:
user_name: string The user name to retrieve
Returns:
gdata.apps.UserEntry
"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfUsers(self, start_username=None):
"""Retrieve one page of users in this domain."""
uri = "%s/user/%s" % (self._baseURL(), API_VER)
if start_username is not None:
uri += "?startUsername=%s" % start_username
try:
return gdata.apps.UserFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllUsers(self):
"""Retrieve a generator for all users in this domain."""
first_page = self.RetrievePageOfUsers()
return self.GetGeneratorFromLinkFinder(first_page,
gdata.apps.UserFeedFromString)
def RetrieveAllUsers(self):
"""Retrieve all users in this domain. OBSOLETE"""
ret = self.RetrievePageOfUsers()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.UserFeedFromString)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class EmailList(atom.AtomBase):
"""The Google Apps EmailList element"""
_tag = 'emailList'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListFromString(xml_string):
return atom.CreateClassFromXMLString(EmailList, xml_string)
class Who(atom.AtomBase):
"""The Google Apps Who element"""
_tag = 'who'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['email'] = 'email'
def __init__(self, rel=None, email=None, extension_elements=None,
extension_attributes=None, text=None):
self.rel = rel
self.email = email
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def WhoFromString(xml_string):
return atom.CreateClassFromXMLString(Who, xml_string)
class Login(atom.AtomBase):
"""The Google Apps Login element"""
_tag = 'login'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['userName'] = 'user_name'
_attributes['password'] = 'password'
_attributes['suspended'] = 'suspended'
_attributes['admin'] = 'admin'
_attributes['changePasswordAtNextLogin'] = 'change_password'
_attributes['agreedToTerms'] = 'agreed_to_terms'
_attributes['ipWhitelisted'] = 'ip_whitelisted'
_attributes['hashFunctionName'] = 'hash_function_name'
def __init__(self, user_name=None, password=None, suspended=None,
ip_whitelisted=None, hash_function_name=None,
admin=None, change_password=None, agreed_to_terms=None,
extension_elements=None, extension_attributes=None,
text=None):
self.user_name = user_name
self.password = password
self.suspended = suspended
self.admin = admin
self.change_password = change_password
self.agreed_to_terms = agreed_to_terms
self.ip_whitelisted = ip_whitelisted
self.hash_function_name = hash_function_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LoginFromString(xml_string):
return atom.CreateClassFromXMLString(Login, xml_string)
class Quota(atom.AtomBase):
"""The Google Apps Quota element"""
_tag = 'quota'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['limit'] = 'limit'
def __init__(self, limit=None, extension_elements=None,
extension_attributes=None, text=None):
self.limit = limit
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def QuotaFromString(xml_string):
return atom.CreateClassFromXMLString(Quota, xml_string)
class Name(atom.AtomBase):
"""The Google Apps Name element"""
_tag = 'name'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['familyName'] = 'family_name'
_attributes['givenName'] = 'given_name'
def __init__(self, family_name=None, given_name=None,
extension_elements=None, extension_attributes=None, text=None):
self.family_name = family_name
self.given_name = given_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return atom.CreateClassFromXMLString(Name, xml_string)
class Nickname(atom.AtomBase):
"""The Google Apps Nickname element"""
_tag = 'nickname'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None,
extension_elements=None, extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameFromString(xml_string):
return atom.CreateClassFromXMLString(Nickname, xml_string)
class NicknameEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry for Nickname"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, nickname=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.nickname = nickname
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameEntryFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameEntry, xml_string)
class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps Nickname feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def NicknameFeedFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameFeed, xml_string)
class UserEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}name' % APPS_NAMESPACE] = ('name', Name)
_children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota)
# This child may already be defined in GDataEntry, confirm before removing.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, name=None, quota=None, who=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.name = name
self.quota = quota
self.who = who
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UserEntryFromString(xml_string):
return atom.CreateClassFromXMLString(UserEntry, xml_string)
class UserFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps User feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def UserFeedFromString(xml_string):
return atom.CreateClassFromXMLString(UserFeed, xml_string)
class EmailListEntry(gdata.GDataEntry):
"""A Google Apps EmailList flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList)
# Might be able to remove this _children entry.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
email_list=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.email_list = email_list
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListEntry, xml_string)
class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailList feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListFeed, xml_string)
class EmailListRecipientEntry(gdata.GDataEntry):
"""A Google Apps EmailListRecipient flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
who=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.who = who
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListRecipientEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string)
class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailListRecipient feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[EmailListRecipientEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListRecipientFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains objects used with Google Apps."""
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
import atom
import gdata
# XML namespaces which are often used in Google Apps entity.
APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
class EmailList(atom.AtomBase):
"""The Google Apps EmailList element"""
_tag = 'emailList'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListFromString(xml_string):
return atom.CreateClassFromXMLString(EmailList, xml_string)
class Who(atom.AtomBase):
"""The Google Apps Who element"""
_tag = 'who'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['rel'] = 'rel'
_attributes['email'] = 'email'
def __init__(self, rel=None, email=None, extension_elements=None,
extension_attributes=None, text=None):
self.rel = rel
self.email = email
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def WhoFromString(xml_string):
return atom.CreateClassFromXMLString(Who, xml_string)
class Login(atom.AtomBase):
"""The Google Apps Login element"""
_tag = 'login'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['userName'] = 'user_name'
_attributes['password'] = 'password'
_attributes['suspended'] = 'suspended'
_attributes['admin'] = 'admin'
_attributes['changePasswordAtNextLogin'] = 'change_password'
_attributes['agreedToTerms'] = 'agreed_to_terms'
_attributes['ipWhitelisted'] = 'ip_whitelisted'
_attributes['hashFunctionName'] = 'hash_function_name'
def __init__(self, user_name=None, password=None, suspended=None,
ip_whitelisted=None, hash_function_name=None,
admin=None, change_password=None, agreed_to_terms=None,
extension_elements=None, extension_attributes=None,
text=None):
self.user_name = user_name
self.password = password
self.suspended = suspended
self.admin = admin
self.change_password = change_password
self.agreed_to_terms = agreed_to_terms
self.ip_whitelisted = ip_whitelisted
self.hash_function_name = hash_function_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LoginFromString(xml_string):
return atom.CreateClassFromXMLString(Login, xml_string)
class Quota(atom.AtomBase):
"""The Google Apps Quota element"""
_tag = 'quota'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['limit'] = 'limit'
def __init__(self, limit=None, extension_elements=None,
extension_attributes=None, text=None):
self.limit = limit
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def QuotaFromString(xml_string):
return atom.CreateClassFromXMLString(Quota, xml_string)
class Name(atom.AtomBase):
"""The Google Apps Name element"""
_tag = 'name'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['familyName'] = 'family_name'
_attributes['givenName'] = 'given_name'
def __init__(self, family_name=None, given_name=None,
extension_elements=None, extension_attributes=None, text=None):
self.family_name = family_name
self.given_name = given_name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NameFromString(xml_string):
return atom.CreateClassFromXMLString(Name, xml_string)
class Nickname(atom.AtomBase):
"""The Google Apps Nickname element"""
_tag = 'nickname'
_namespace = APPS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None,
extension_elements=None, extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameFromString(xml_string):
return atom.CreateClassFromXMLString(Nickname, xml_string)
class NicknameEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry for Nickname"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, nickname=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.nickname = nickname
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def NicknameEntryFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameEntry, xml_string)
class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps Nickname feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def NicknameFeedFromString(xml_string):
return atom.CreateClassFromXMLString(NicknameFeed, xml_string)
class UserEntry(gdata.GDataEntry):
"""A Google Apps flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}login' % APPS_NAMESPACE] = ('login', Login)
_children['{%s}name' % APPS_NAMESPACE] = ('name', Name)
_children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota)
# This child may already be defined in GDataEntry, confirm before removing.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
login=None, name=None, quota=None, who=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.login = login
self.name = name
self.quota = quota
self.who = who
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def UserEntryFromString(xml_string):
return atom.CreateClassFromXMLString(UserEntry, xml_string)
class UserFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps User feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def UserFeedFromString(xml_string):
return atom.CreateClassFromXMLString(UserFeed, xml_string)
class EmailListEntry(gdata.GDataEntry):
"""A Google Apps EmailList flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList)
# Might be able to remove this _children entry.
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
[gdata.FeedLink])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
email_list=None, feed_link=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.email_list = email_list
self.feed_link = feed_link or []
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListEntry, xml_string)
class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailList feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListFeed, xml_string)
class EmailListRecipientEntry(gdata.GDataEntry):
"""A Google Apps EmailListRecipient flavor of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
who=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated)
self.who = who
self.extended_property = extended_property or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EmailListRecipientEntryFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string)
class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Apps EmailListRecipient feed flavor of an Atom Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[EmailListRecipientEntry])
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def EmailListRecipientFeedFromString(xml_string):
return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2007 SIOS Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import urllib
import gdata
import atom.service
import gdata.service
import gdata.apps
import atom
API_VER="2.0"
HTTP_OK=200
UNKOWN_ERROR=1000
USER_DELETED_RECENTLY=1100
USER_SUSPENDED=1101
DOMAIN_USER_LIMIT_EXCEEDED=1200
DOMAIN_ALIAS_LIMIT_EXCEEDED=1201
DOMAIN_SUSPENDED=1202
DOMAIN_FEATURE_UNAVAILABLE=1203
ENTITY_EXISTS=1300
ENTITY_DOES_NOT_EXIST=1301
ENTITY_NAME_IS_RESERVED=1302
ENTITY_NAME_NOT_VALID=1303
INVALID_GIVEN_NAME=1400
INVALID_FAMILY_NAME=1401
INVALID_PASSWORD=1402
INVALID_USERNAME=1403
INVALID_HASH_FUNCTION_NAME=1404
INVALID_HASH_DIGGEST_LENGTH=1405
INVALID_EMAIL_ADDRESS=1406
INVALID_QUERY_PARAMETER_VALUE=1407
TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500
DEFAULT_QUOTA_LIMIT='2048'
class Error(Exception):
pass
class AppsForYourDomainException(Error):
def __init__(self, response):
Error.__init__(self, response)
try:
self.element_tree = ElementTree.fromstring(response['body'])
self.error_code = int(self.element_tree[0].attrib['errorCode'])
self.reason = self.element_tree[0].attrib['reason']
self.invalidInput = self.element_tree[0].attrib['invalidInput']
except:
self.error_code = UNKOWN_ERROR
class AppsService(gdata.service.GDataService):
"""Client for the Google Apps Provisioning service."""
def __init__(self, email=None, password=None, domain=None, source=None,
server='www.google.com', additional_headers=None):
gdata.service.GDataService.__init__(self, email=email, password=password,
service='apps', source=source,
server=server,
additional_headers=additional_headers)
self.ssl = True
self.port = 443
self.domain = domain
def _baseURL(self):
return "/a/feeds/%s" % self.domain
def GetGeneratorFromLinkFinder(self, link_finder, func):
"""returns a generator for pagination"""
yield link_finder
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
yield next_feed
next = next_feed.GetNextLink()
def AddAllElementsFromAllPages(self, link_finder, func):
"""retrieve all pages and add all elements"""
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.Get(next.href)))
for a_entry in next_feed.entry:
link_finder.entry.append(a_entry)
next = next_feed.GetNextLink()
return link_finder
def RetrievePageOfEmailLists(self, start_email_list_name=None):
"""Retrieve one page of email list"""
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
if start_email_list_name is not None:
uri += "?startEmailListName=%s" % start_email_list_name
try:
return gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllEmailLists(self):
"""Retrieve all email list of a domain."""
ret = self.RetrievePageOfEmailLists()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RetrieveEmailList(self, list_name):
"""Retreive a single email list by the list's name."""
uri = "%s/emailList/%s/%s" % (
self._baseURL(), API_VER, list_name)
try:
return self.Get(uri, converter=gdata.apps.EmailListEntryFromString)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveEmailLists(self, recipient):
"""Retrieve All Email List Subscriptions for an Email Address."""
uri = "%s/emailList/%s?recipient=%s" % (
self._baseURL(), API_VER, recipient)
try:
ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListFeedFromString)
def RemoveRecipientFromEmailList(self, recipient, list_name):
"""Remove recipient from email list."""
uri = "%s/emailList/%s/%s/recipient/%s" % (
self._baseURL(), API_VER, list_name, recipient)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfRecipients(self, list_name, start_recipient=None):
"""Retrieve one page of recipient of an email list. """
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
if start_recipient is not None:
uri += "?startRecipient=%s" % start_recipient
try:
return gdata.apps.EmailListRecipientFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllRecipients(self, list_name):
"""Retrieve all recipient of an email list."""
ret = self.RetrievePageOfRecipients(list_name)
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.EmailListRecipientFeedFromString)
def AddRecipientToEmailList(self, recipient, list_name):
"""Add a recipient to a email list."""
uri = "%s/emailList/%s/%s/recipient" % (
self._baseURL(), API_VER, list_name)
recipient_entry = gdata.apps.EmailListRecipientEntry()
recipient_entry.who = gdata.apps.Who(email=recipient)
try:
return gdata.apps.EmailListRecipientEntryFromString(
str(self.Post(recipient_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteEmailList(self, list_name):
"""Delete a email list"""
uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateEmailList(self, list_name):
"""Create a email list. """
uri = "%s/emailList/%s" % (self._baseURL(), API_VER)
email_list_entry = gdata.apps.EmailListEntry()
email_list_entry.email_list = gdata.apps.EmailList(name=list_name)
try:
return gdata.apps.EmailListEntryFromString(
str(self.Post(email_list_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteNickname(self, nickname):
"""Delete a nickname"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfNicknames(self, start_nickname=None):
"""Retrieve one page of nicknames in the domain"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
if start_nickname is not None:
uri += "?startNickname=%s" % start_nickname
try:
return gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrieveAllNicknames(self):
"""Retrieve all nicknames in the domain"""
ret = self.RetrievePageOfNicknames()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNicknames(self, user_name):
"""Retrieve nicknames of the user"""
uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name)
try:
ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.NicknameFeedFromString)
def RetrieveNickname(self, nickname):
"""Retrieve a nickname.
Args:
nickname: string The nickname to retrieve
Returns:
gdata.apps.NicknameEntry
"""
uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname)
try:
return gdata.apps.NicknameEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateNickname(self, user_name, nickname):
"""Create a nickname"""
uri = "%s/nickname/%s" % (self._baseURL(), API_VER)
nickname_entry = gdata.apps.NicknameEntry()
nickname_entry.login = gdata.apps.Login(user_name=user_name)
nickname_entry.nickname = gdata.apps.Nickname(name=nickname)
try:
return gdata.apps.NicknameEntryFromString(
str(self.Post(nickname_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def DeleteUser(self, user_name):
"""Delete a user account"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return self.Delete(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def UpdateUser(self, user_name, user_entry):
"""Update a user account."""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def CreateUser(self, user_name, family_name, given_name, password,
suspended='false', quota_limit=None,
password_hash_function=None):
"""Create a user account. """
uri = "%s/user/%s" % (self._baseURL(), API_VER)
user_entry = gdata.apps.UserEntry()
user_entry.login = gdata.apps.Login(
user_name=user_name, password=password, suspended=suspended,
hash_function_name=password_hash_function)
user_entry.name = gdata.apps.Name(family_name=family_name,
given_name=given_name)
if quota_limit is not None:
user_entry.quota = gdata.apps.Quota(limit=str(quota_limit))
try:
return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def SuspendUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'true':
user_entry.login.suspended = 'true'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RestoreUser(self, user_name):
user_entry = self.RetrieveUser(user_name)
if user_entry.login.suspended != 'false':
user_entry.login.suspended = 'false'
user_entry = self.UpdateUser(user_name, user_entry)
return user_entry
def RetrieveUser(self, user_name):
"""Retrieve an user account.
Args:
user_name: string The user name to retrieve
Returns:
gdata.apps.UserEntry
"""
uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name)
try:
return gdata.apps.UserEntryFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def RetrievePageOfUsers(self, start_username=None):
"""Retrieve one page of users in this domain."""
uri = "%s/user/%s" % (self._baseURL(), API_VER)
if start_username is not None:
uri += "?startUsername=%s" % start_username
try:
return gdata.apps.UserFeedFromString(str(self.Get(uri)))
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def GetGeneratorForAllUsers(self):
"""Retrieve a generator for all users in this domain."""
first_page = self.RetrievePageOfUsers()
return self.GetGeneratorFromLinkFinder(first_page,
gdata.apps.UserFeedFromString)
def RetrieveAllUsers(self):
"""Retrieve all users in this domain. OBSOLETE"""
ret = self.RetrievePageOfUsers()
# pagination
return self.AddAllElementsFromAllPages(
ret, gdata.apps.UserFeedFromString)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
HttpRequest: Function that wraps google.appengine.api.urlfetch.Fetch in a
common interface which is used by gdata.service.GDataService. In other
words, this module can be used as the gdata service request handler so
that all HTTP requests will be performed by the hosting Google App Engine
server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import atom.service
import atom.http_interface
from google.appengine.api import urlfetch
def run_on_appengine(gdata_service):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member.
"""
gdata_service.http_client = AppEngineHttpClient()
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = len(data_str)
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers))
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This function is deprecated, use AppEngineHttpClient.request instead.
To use this module with gdata.service, you can set this module to be the
http_request_handler so that HTTP requests use Google App Engine's urlfetch.
import gdata.service
import gdata.urlfetch
gdata.service.http_request_handler = gdata.urlfetch
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
full_uri = atom.service.BuildUri(uri, url_params, escape_params)
(server, port, ssl, partial_uri) = atom.service.ProcessUrl(service, full_uri)
# Construct the full URL for the request.
if ssl:
full_url = 'https://%s%s' % (server, partial_uri)
else:
full_url = 'http://%s%s' % (server, partial_uri)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [__ConvertDataPart(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = __ConvertDataPart(data)
# Construct the dictionary of HTTP headers.
headers = {}
if isinstance(service.additional_headers, dict):
headers = service.additional_headers.copy()
if isinstance(extra_headers, dict):
for header, value in extra_headers.iteritems():
headers[header] = value
# Add the content type header (we don't need to calculate content length,
# since urlfetch.Fetch will calculate for us).
if content_type:
headers['Content-Type'] = content_type
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=full_url, payload=data_str,
method=method, headers=headers))
def __ConvertDataPart(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
| Python |
# -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.exif, implementing the exif namespace in gdata
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
These elements describe image data, using exif attributes[2].
Picasa Web Albums uses the exif namespace to represent Exif data encoded
in a photo [3].
Picasa Web Albums uses the following exif elements:
exif:distance
exif:exposure
exif:flash
exif:focallength
exif:fstop
exif:imageUniqueID
exif:iso
exif:make
exif:model
exif:tags
exif:time
[1]: http://schemas.google.com/photos/exif/2007.
[2]: http://en.wikipedia.org/wiki/Exif
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
"""
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
class ExifBaseElement(atom.AtomBase):
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
""" % EXIF_NAMESPACE
_tag = ''
_namespace = EXIF_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Distance(ExifBaseElement):
"(float) The distance to the subject, e.g. 0.0"
_tag = 'distance'
def DistanceFromString(xml_string):
return atom.CreateClassFromXMLString(Distance, xml_string)
class Exposure(ExifBaseElement):
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
_tag = 'exposure'
def ExposureFromString(xml_string):
return atom.CreateClassFromXMLString(Exposure, xml_string)
class Flash(ExifBaseElement):
"""(string) Boolean value indicating whether the flash was used.
The .text attribute will either be `true' or `false'
As a convenience, this object's .bool method will return what you want,
so you can say:
flash_used = bool(Flash)
"""
_tag = 'flash'
def __bool__(self):
if self.text.lower() in ('true','false'):
return self.text.lower() == 'true'
def FlashFromString(xml_string):
return atom.CreateClassFromXMLString(Flash, xml_string)
class Focallength(ExifBaseElement):
"(float) The focal length used, e.g. 23.7"
_tag = 'focallength'
def FocallengthFromString(xml_string):
return atom.CreateClassFromXMLString(Focallength, xml_string)
class Fstop(ExifBaseElement):
"(float) The fstop value used, e.g. 5.0"
_tag = 'fstop'
def FstopFromString(xml_string):
return atom.CreateClassFromXMLString(Fstop, xml_string)
class ImageUniqueID(ExifBaseElement):
"(string) The unique image ID for the photo. Generated by Google Photo servers"
_tag = 'imageUniqueID'
def ImageUniqueIDFromString(xml_string):
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
class Iso(ExifBaseElement):
"(int) The iso equivalent value used, e.g. 200"
_tag = 'iso'
def IsoFromString(xml_string):
return atom.CreateClassFromXMLString(Iso, xml_string)
class Make(ExifBaseElement):
"(string) The make of the camera used, e.g. Fictitious Camera Company"
_tag = 'make'
def MakeFromString(xml_string):
return atom.CreateClassFromXMLString(Make, xml_string)
class Model(ExifBaseElement):
"(string) The model of the camera used,e.g AMAZING-100D"
_tag = 'model'
def ModelFromString(xml_string):
return atom.CreateClassFromXMLString(Model, xml_string)
class Time(ExifBaseElement):
"""(int) The date/time the photo was taken, e.g. 1180294337000.
Represented as the number of milliseconds since January 1st, 1970.
The value of this element will always be identical to the value
of the <gphoto:timestamp>.
Look at this object's .isoformat() for a human friendly datetime string:
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'time'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimeFromString(xml_string):
return atom.CreateClassFromXMLString(Time, xml_string)
class Tags(ExifBaseElement):
"""The container for all exif elements.
The <exif:tags> element can appear as a child of a photo entry.
"""
_tag = 'tags'
_children = atom.AtomBase._children.copy()
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
ExifBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.fstop=None
self.make=None
self.model=None
self.distance=None
self.exposure=None
self.flash=None
self.focallength=None
self.iso=None
self.time=None
self.imageUniqueID=None
def TagsFromString(xml_string):
return atom.CreateClassFromXMLString(Tags, xml_string)
| Python |
#!/usr/bin/env python
# -*-*- encoding: utf-8 -*-*-
#
# This is the service file for the Google Photo python client.
# It is used for higher level operations.
#
# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google PhotoService provides a human-friendly interface to
Google Photo (a.k.a Picasa Web) services[1].
It extends gdata.service.GDataService and as such hides all the
nasty details about authenticating, parsing and communicating with
Google Photos.
[1]: http://code.google.com/apis/picasaweb/gdata.html
Example:
import gdata.photos, gdata.photos.service
pws = gdata.photos.service.PhotosService()
pws.ClientLogin(username, password)
#Get all albums
albums = pws.GetUserFeed().entry
# Get all photos in second album
photos = pws.GetFeed(albums[1].GetPhotosUri()).entry
# Get all tags for photos in second album and print them
tags = pws.GetFeed(albums[1].GetTagsUri()).entry
print [ tag.summary.text for tag in tags ]
# Get all comments for the first photos in list and print them
comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry
print [ c.summary.text for c in comments ]
# Get a photo to work with
photo = photos[0]
# Update metadata
# Attributes from the <gphoto:*> namespace
photo.summary.text = u'A nice view from my veranda'
photo.title.text = u'Verandaview.jpg'
# Attributes from the <media:*> namespace
photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated
# Adding attributes to media object
# Rotate 90 degrees clockwise
photo.rotation = gdata.photos.Rotation(text='90')
# Submit modified photo object
photo = pws.UpdatePhotoMetadata(photo)
# Make sure you only modify the newly returned object, else you'll get
# versioning errors. See Optimistic-concurrency
# Add comment to a picture
comment = pws.InsertComment(photo, u'I wish the water always was this warm')
# Remove comment because it was silly
print "*blush*"
pws.Delete(comment.GetEditLink().href)
"""
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
__version__ = '$Revision: 176 $'[11:-2]
import sys, os.path, StringIO
import time
import gdata.service
import gdata
import atom.service
import atom
import gdata.photos
SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png')
UNKOWN_ERROR=1000
GPHOTOS_BAD_REQUEST=400
GPHOTOS_CONFLICT=409
GPHOTOS_INTERNAL_SERVER_ERROR=500
GPHOTOS_INVALID_ARGUMENT=601
GPHOTOS_INVALID_CONTENT_TYPE=602
GPHOTOS_NOT_AN_IMAGE=603
GPHOTOS_INVALID_KIND=604
class GooglePhotosException(Exception):
def __init__(self, response):
self.error_code = response['status']
self.reason = response['reason'].strip()
if '<html>' in str(response['body']): #general html message, discard it
response['body'] = ""
self.body = response['body'].strip()
self.message = "(%(status)s) %(body)s -- %(reason)s" % response
#return explicit error codes
error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE,
'kind: That is not one of the acceptable values':
GPHOTOS_INVALID_KIND,
}
for msg, code in error_map.iteritems():
if self.body == msg:
self.error_code = code
break
self.args = [self.error_code, self.reason, self.body]
class PhotosService(gdata.service.GDataService):
userUri = '/data/feed/api/user/%s'
def __init__(self, email=None, password=None,
source=None, server='picasaweb.google.com', additional_headers=None):
""" GooglePhotosService constructor.
Arguments:
email: string (optional) The e-mail address of the account to use for
authentication.
password: string (optional) The password of the account to use for
authentication.
source: string (optional) The name of the user's application.
server: string (optional) The server the feed is hosted on.
additional_headers: dict (optional) Any additional HTTP headers to be
transmitted to the service in the form of key-value
pairs.
Returns:
A PhotosService object used to communicate with the Google Photos
service.
"""
self.email = email
self.client = source
gdata.service.GDataService.__init__(self, email=self.email, password=password,
service='lh2', source=source,
server=server,
additional_headers=additional_headers)
def GetFeed(self, uri, limit=None, start_index=None):
"""Get a feed.
The results are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
uri: the uri to fetch
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumFeed,
gdata.photos.UserFeed,
gdata.photos.PhotoFeed,
gdata.photos.CommentFeed,
gdata.photos.TagFeed,
depending on the results of the query.
Raises:
GooglePhotosException
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyFeedFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetEntry(self, uri, limit=None, start_index=None):
"""Get an Entry.
Arguments:
uri: the uri to the entry
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumEntry,
gdata.photos.UserEntry,
gdata.photos.PhotoEntry,
gdata.photos.CommentEntry,
gdata.photos.TagEntry,
depending on the results of the query.
Raises:
GooglePhotosException
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetUserFeed(self, kind='album', user='default', limit=None):
"""Get user-based feed, containing albums, photos, comments or tags;
defaults to albums.
The entries are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
kind: the kind of entries to get, either `album', `photo',
`comment' or `tag', or a python list of these. Defaults to `album'.
user (optional): whose albums we're querying. Defaults to current user.
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed, containing appropriate Entry elements
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html
"""
if isinstance(kind, (list, tuple) ):
kind = ",".join(kind)
uri = '/data/feed/api/user/%s?kind=%s' % (user, kind)
return self.GetFeed(uri, limit=limit)
def GetTaggedPhotos(self, tag, user='default', limit=None):
"""Get all photos belonging to a specific user, tagged by the given keyword
Arguments:
tag: The tag you're looking for, e.g. `dog'
user (optional): Whose images/videos you want to search, defaults
to current user
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed containing PhotoEntry elements
"""
# Lower-casing because of
# http://code.google.com/p/gdata-issues/issues/detail?id=194
uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower())
return self.GetFeed(uri, limit)
def SearchUserPhotos(self, query, user='default', limit=100):
"""Search through all photos for a specific user and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
user (optional): The username of whose photos you want to search, defaults
to current user.
limit (optional): Don't return more than `limit' hits, defaults to 100
Only public photos are searched, unless you are authenticated and
searching through your own photos.
Returns:
gdata.photos.UserFeed with PhotoEntry elements
"""
uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query)
return self.GetFeed(uri, limit=limit)
def SearchCommunityPhotos(self, query, limit=100):
"""Search through all public photos and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
limit (optional): Don't return more than `limit' hits, defaults to 100
Returns:
gdata.GDataFeed with PhotoEntry elements
"""
uri='/data/feed/api/all?q=%s' % query
return self.GetFeed(uri, limit=limit)
def GetContacts(self, user='default', limit=None):
"""Retrieve a feed that contains a list of your contacts
Arguments:
user: Username of the user whose contacts you want
Returns
gdata.photos.UserFeed, with UserEntry entries
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=user' % user
return self.GetFeed(uri, limit=limit)
def SearchContactsPhotos(self, user='default', search=None, limit=None):
"""Search over your contacts' photos and return a feed
Arguments:
user: Username of the user whose contacts you want
search (optional): What to search for (photo title, description and keywords)
Returns
gdata.photos.UserFeed, with PhotoEntry elements
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search)
return self.GetFeed(uri, limit=limit)
def InsertAlbum(self, title, summary, location=None, access='public',
commenting_enabled='true', timestamp=None):
"""Add an album.
Needs authentication, see self.ClientLogin()
Arguments:
title: Album title
summary: Album summary / description
access (optional): `private' or `public'. Public albums are searchable
by everyone on the internet. Defaults to `public'
commenting_enabled (optional): `true' or `false'. Defaults to `true'.
timestamp (optional): A date and time for the album, in milliseconds since
Unix epoch[1] UTC. Defaults to now.
Returns:
The newly created gdata.photos.AlbumEntry
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
album = gdata.photos.AlbumEntry()
album.title = atom.Title(text=title, title_type='text')
album.summary = atom.Summary(text=summary, summary_type='text')
if location is not None:
album.location = gdata.photos.Location(text=location)
album.access = gdata.photos.Access(text=access)
if commenting_enabled in ('true', 'false'):
album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled)
if timestamp is None:
timestamp = '%i' % int(time.time() * 1000)
album.timestamp = gdata.photos.Timestamp(text=timestamp)
try:
return self.Post(album, uri=self.userUri % self.email,
converter=gdata.photos.AlbumEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertPhoto(self, album_or_uri, photo, filename_or_handle,
content_type='image/jpeg'):
"""Add a PhotoEntry
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
photo: PhotoEntry to add
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
"""
try:
assert(isinstance(photo, gdata.photos.PhotoEntry))
except AssertionError:
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`photo` must be a gdata.photos.PhotoEntry instance',
'reason':'Found %s, not PhotoEntry' % type(photo)
})
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, (str, unicode)) and \
os.path.exists(filename_or_handle): # it's a file name
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or object with a .read() method' % \
type(filename_or_handle)
})
if isinstance(album_or_uri, (str, unicode)): # it's a uri
feed_uri = album_or_uri
elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object
feed_uri = album_or_uri.GetFeedLink().href
try:
return self.Post(photo, uri=feed_uri, media_source=mediasource,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle,
content_type='image/jpeg', keywords=None):
"""Add a photo without constructing a PhotoEntry.
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
title: Photo title
summary: Photo summary / description
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
keywords (optional): a 1) comma separated string or 2) a python list() of
keywords (a.k.a. tags) to add to the image.
E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation']
Returns:
The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
metadata = gdata.photos.PhotoEntry()
metadata.title=atom.Title(text=title)
metadata.summary = atom.Summary(text=summary, summary_type='text')
if keywords is not None:
if isinstance(keywords, list):
keywords = ','.join(keywords)
metadata.media.keywords = gdata.media.Keywords(text=keywords)
return self.InsertPhoto(album_or_uri, metadata, filename_or_handle,
content_type)
def UpdatePhotoMetadata(self, photo):
"""Update a photo's metadata.
Needs authentication, see self.ClientLogin()
You can update any or all of the following metadata properties:
* <title>
* <media:description>
* <gphoto:checksum>
* <gphoto:client>
* <gphoto:rotation>
* <gphoto:timestamp>
* <gphoto:commentingEnabled>
Arguments:
photo: a gdata.photos.PhotoEntry object with updated elements
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(uri).entry[0]
p.title.text = u'My new text'
p.commentingEnabled.text = 'false'
p = UpdatePhotoMetadata(p)
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
return self.Put(data=photo, uri=photo.GetEditLink().href,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle,
content_type = 'image/jpeg'):
"""Update a photo's binary data.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a
`edit-media' uri pointing to it
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(PhotoUri)
p = UpdatePhotoBlob(p, '/tmp/newPic.jpg')
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, (str, unicode)) and \
os.path.exists(filename_or_handle): # it's a file name
photoblob = gdata.MediaSource()
photoblob.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or an object with .read() method' % \
type(filename_or_handle)
})
if isinstance(photo_or_uri, (str, unicode)):
entry_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
entry_uri = photo_or_uri.GetEditMediaLink().href
try:
return self.Put(photoblob, entry_uri,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertTag(self, photo_or_uri, tag):
"""Add a tag (a.k.a. keyword) to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a
`post' uri pointing to it
(string) tag: The tag/keyword
Returns:
The new gdata.photos.TagEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertTag(p, 'Beautiful sunsets')
"""
tag = gdata.photos.TagEntry(title=atom.Title(text=tag))
if isinstance(photo_or_uri, (str, unicode)):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=tag, uri=post_uri,
converter=gdata.photos.TagEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertComment(self, photo_or_uri, comment):
"""Add a comment to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented
, or a `post' uri pointing to it
(string) comment: The actual comment
Returns:
The new gdata.photos.CommentEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertComment(p, 'OOOH! I would have loved to be there.
Who's that in the back?')
"""
comment = gdata.photos.CommentEntry(content=atom.Content(text=comment))
if isinstance(photo_or_uri, (str, unicode)):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=comment, uri=post_uri,
converter=gdata.photos.CommentEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def Delete(self, object_or_uri, *args, **kwargs):
"""Delete an object.
Re-implementing the GDataService.Delete method, to add some
convenience.
Arguments:
object_or_uri: Any object that has a GetEditLink() method that
returns a link, or a uri to that object.
Returns:
? or GooglePhotosException on errors
"""
try:
uri = object_or_uri.GetEditLink().href
except AttributeError:
uri = object_or_uri
try:
return gdata.service.GDataService.Delete(self, uri, *args, **kwargs)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetSmallestThumbnail(media_thumbnail_list):
"""Helper function to get the smallest thumbnail of a list of
gdata.media.Thumbnail.
Returns gdata.media.Thumbnail """
r = {}
for thumb in media_thumbnail_list:
r[int(thumb.width)*int(thumb.height)] = thumb
keys = r.keys()
keys.sort()
return r[keys[0]]
def ConvertAtomTimestampToEpoch(timestamp):
"""Helper function to convert a timestamp string, for instance
from atom:updated or atom:published, to milliseconds since Unix epoch
(a.k.a. POSIX time).
`2007-07-22T00:45:10.000Z' -> """
return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z'))
## TODO: Timezone aware
| Python |
# -*-*- encoding: utf-8 -*-*-
#
# This is the base file for the PicasaWeb python client.
# It is used for lower level operations.
#
# $Id: __init__.py 148 2007-10-28 15:09:19Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a pythonic, gdata-centric interface to Google Photos
(a.k.a. Picasa Web Services.
It is modelled after the gdata/* interfaces from the gdata-python-client
project[1] by Google.
You'll find the user-friendly api in photos.service. Please see the
documentation or live help() system for available methods.
[1]: http://gdata-python-client.googlecode.com/
"""
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
__version__ = '$Revision: 164 $'[11:-2]
import re
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
# importing google photo submodules
import gdata.media as Media, gdata.exif as Exif, gdata.geo as Geo
# XML namespaces which are often used in Google Photo elements
PHOTOS_NAMESPACE = 'http://schemas.google.com/photos/2007'
MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/'
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
GML_NAMESPACE = 'http://www.opengis.net/gml'
GEORSS_NAMESPACE = 'http://www.georss.org/georss'
PHEED_NAMESPACE = 'http://www.pheed.com/pheed/'
BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
class PhotosBaseElement(atom.AtomBase):
"""Base class for elements in the PHOTO_NAMESPACE. To add new elements,
you only need to add the element tag name to self._tag
"""
_tag = ''
_namespace = PHOTOS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
#def __str__(self):
#return str(self.text)
#def __unicode__(self):
#return unicode(self.text)
def __int__(self):
return int(self.text)
def bool(self):
return self.text == 'true'
class GPhotosBaseFeed(gdata.GDataFeed, gdata.LinkFinder):
"Base class for all Feeds in gdata.photos"
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_attributes = gdata.GDataFeed._attributes.copy()
_children = gdata.GDataFeed._children.copy()
# We deal with Entry elements ourselves
del _children['{%s}entry' % atom.ATOM_NAMESPACE]
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None):
gdata.GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def kind(self):
"(string) Returns the kind"
try:
return self.category[0].term.split('#')[1]
except IndexError:
return None
def _feedUri(self, kind):
"Convenience method to return a uri to a feed of a special kind"
assert(kind in ('album', 'tag', 'photo', 'comment', 'user'))
here_href = self.GetSelfLink().href
if 'kind=%s' % kind in here_href:
return here_href
if not 'kind=' in here_href:
sep = '?'
if '?' in here_href: sep = '&'
return here_href + "%skind=%s" % (sep, kind)
rx = re.match('.*(kind=)(album|tag|photo|comment)', here_href)
return here_href[:rx.end(1)] + kind + here_href[rx.end(2):]
def _ConvertElementTreeToMember(self, child_tree):
"""Re-implementing the method from AtomBase, since we deal with
Entry elements specially"""
category = child_tree.find('{%s}category' % atom.ATOM_NAMESPACE)
if category is None:
return atom.AtomBase._ConvertElementTreeToMember(self, child_tree)
namespace, kind = category.get('term').split('#')
if namespace != PHOTOS_NAMESPACE:
return atom.AtomBase._ConvertElementTreeToMember(self, child_tree)
## TODO: is it safe to use getattr on gdata.photos?
entry_class = getattr(gdata.photos, '%sEntry' % kind.title())
if not hasattr(self, 'entry') or self.entry is None:
self.entry = []
self.entry.append(atom._CreateClassFromElementTree(
entry_class, child_tree))
class GPhotosBaseEntry(gdata.GDataEntry, gdata.LinkFinder):
"Base class for all Entry elements in gdata.photos"
_tag = 'entry'
_kind = ''
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.category.append(
atom.Category(scheme='http://schemas.google.com/g/2005#kind',
term = 'http://schemas.google.com/photos/2007#%s' % self._kind))
def kind(self):
"(string) Returns the kind"
try:
return self.category[0].term.split('#')[1]
except IndexError:
return None
def _feedUri(self, kind):
"Convenience method to get the uri to this entry's feed of the some kind"
try:
href = self.GetFeedLink().href
except AttributeError:
return None
sep = '?'
if '?' in href: sep = '&'
return '%s%skind=%s' % (href, sep, kind)
class PhotosBaseEntry(GPhotosBaseEntry):
pass
class PhotosBaseFeed(GPhotosBaseFeed):
pass
class GPhotosBaseData(object):
pass
class Access(PhotosBaseElement):
"""The Google Photo `Access' element.
The album's access level. Valid values are `public' or `private'.
In documentation, access level is also referred to as `visibility.'"""
_tag = 'access'
def AccessFromString(xml_string):
return atom.CreateClassFromXMLString(Access, xml_string)
class Albumid(PhotosBaseElement):
"The Google Photo `Albumid' element"
_tag = 'albumid'
def AlbumidFromString(xml_string):
return atom.CreateClassFromXMLString(Albumid, xml_string)
class BytesUsed(PhotosBaseElement):
"The Google Photo `BytesUsed' element"
_tag = 'bytesUsed'
def BytesUsedFromString(xml_string):
return atom.CreateClassFromXMLString(BytesUsed, xml_string)
class Client(PhotosBaseElement):
"The Google Photo `Client' element"
_tag = 'client'
def ClientFromString(xml_string):
return atom.CreateClassFromXMLString(Client, xml_string)
class Checksum(PhotosBaseElement):
"The Google Photo `Checksum' element"
_tag = 'checksum'
def ChecksumFromString(xml_string):
return atom.CreateClassFromXMLString(Checksum, xml_string)
class CommentCount(PhotosBaseElement):
"The Google Photo `CommentCount' element"
_tag = 'commentCount'
def CommentCountFromString(xml_string):
return atom.CreateClassFromXMLString(CommentCount, xml_string)
class CommentingEnabled(PhotosBaseElement):
"The Google Photo `CommentingEnabled' element"
_tag = 'commentingEnabled'
def CommentingEnabledFromString(xml_string):
return atom.CreateClassFromXMLString(CommentingEnabled, xml_string)
class Height(PhotosBaseElement):
"The Google Photo `Height' element"
_tag = 'height'
def HeightFromString(xml_string):
return atom.CreateClassFromXMLString(Height, xml_string)
class Id(PhotosBaseElement):
"The Google Photo `Id' element"
_tag = 'id'
def IdFromString(xml_string):
return atom.CreateClassFromXMLString(Id, xml_string)
class Location(PhotosBaseElement):
"The Google Photo `Location' element"
_tag = 'location'
def LocationFromString(xml_string):
return atom.CreateClassFromXMLString(Location, xml_string)
class MaxPhotosPerAlbum(PhotosBaseElement):
"The Google Photo `MaxPhotosPerAlbum' element"
_tag = 'maxPhotosPerAlbum'
def MaxPhotosPerAlbumFromString(xml_string):
return atom.CreateClassFromXMLString(MaxPhotosPerAlbum, xml_string)
class Name(PhotosBaseElement):
"The Google Photo `Name' element"
_tag = 'name'
def NameFromString(xml_string):
return atom.CreateClassFromXMLString(Name, xml_string)
class Nickname(PhotosBaseElement):
"The Google Photo `Nickname' element"
_tag = 'nickname'
def NicknameFromString(xml_string):
return atom.CreateClassFromXMLString(Nickname, xml_string)
class Numphotos(PhotosBaseElement):
"The Google Photo `Numphotos' element"
_tag = 'numphotos'
def NumphotosFromString(xml_string):
return atom.CreateClassFromXMLString(Numphotos, xml_string)
class Numphotosremaining(PhotosBaseElement):
"The Google Photo `Numphotosremaining' element"
_tag = 'numphotosremaining'
def NumphotosremainingFromString(xml_string):
return atom.CreateClassFromXMLString(Numphotosremaining, xml_string)
class Position(PhotosBaseElement):
"The Google Photo `Position' element"
_tag = 'position'
def PositionFromString(xml_string):
return atom.CreateClassFromXMLString(Position, xml_string)
class Photoid(PhotosBaseElement):
"The Google Photo `Photoid' element"
_tag = 'photoid'
def PhotoidFromString(xml_string):
return atom.CreateClassFromXMLString(Photoid, xml_string)
class Quotacurrent(PhotosBaseElement):
"The Google Photo `Quotacurrent' element"
_tag = 'quotacurrent'
def QuotacurrentFromString(xml_string):
return atom.CreateClassFromXMLString(Quotacurrent, xml_string)
class Quotalimit(PhotosBaseElement):
"The Google Photo `Quotalimit' element"
_tag = 'quotalimit'
def QuotalimitFromString(xml_string):
return atom.CreateClassFromXMLString(Quotalimit, xml_string)
class Rotation(PhotosBaseElement):
"The Google Photo `Rotation' element"
_tag = 'rotation'
def RotationFromString(xml_string):
return atom.CreateClassFromXMLString(Rotation, xml_string)
class Size(PhotosBaseElement):
"The Google Photo `Size' element"
_tag = 'size'
def SizeFromString(xml_string):
return atom.CreateClassFromXMLString(Size, xml_string)
class Snippet(PhotosBaseElement):
"""The Google Photo `snippet' element.
When searching, the snippet element will contain a
string with the word you're looking for, highlighted in html markup
E.g. when your query is `hafjell', this element may contain:
`... here at <b>Hafjell</b>.'
You'll find this element in searches -- that is, feeds that combine the
`kind=photo' and `q=yoursearch' parameters in the request.
See also gphoto:truncated and gphoto:snippettype.
"""
_tag = 'snippet'
def SnippetFromString(xml_string):
return atom.CreateClassFromXMLString(Snippet, xml_string)
class Snippettype(PhotosBaseElement):
"""The Google Photo `Snippettype' element
When searching, this element will tell you the type of element that matches.
You'll find this element in searches -- that is, feeds that combine the
`kind=photo' and `q=yoursearch' parameters in the request.
See also gphoto:snippet and gphoto:truncated.
Possible values and their interpretation:
o ALBUM_TITLE - The album title matches
o PHOTO_TAGS - The match is a tag/keyword
o PHOTO_DESCRIPTION - The match is in the photo's description
If you discover a value not listed here, please submit a patch to update this docstring.
"""
_tag = 'snippettype'
def SnippettypeFromString(xml_string):
return atom.CreateClassFromXMLString(Snippettype, xml_string)
class Thumbnail(PhotosBaseElement):
"""The Google Photo `Thumbnail' element
Used to display user's photo thumbnail (hackergotchi).
(Not to be confused with the <media:thumbnail> element, which gives you
small versions of the photo object.)"""
_tag = 'thumbnail'
def ThumbnailFromString(xml_string):
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
class Timestamp(PhotosBaseElement):
"""The Google Photo `Timestamp' element
Represented as the number of milliseconds since January 1st, 1970.
Take a look at the convenience methods .isoformat() and .datetime():
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'timestamp'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimestampFromString(xml_string):
return atom.CreateClassFromXMLString(Timestamp, xml_string)
class Truncated(PhotosBaseElement):
"""The Google Photo `Truncated' element
You'll find this element in searches -- that is, feeds that combine the
`kind=photo' and `q=yoursearch' parameters in the request.
See also gphoto:snippet and gphoto:snippettype.
Possible values and their interpretation:
0 -- unknown
"""
_tag = 'Truncated'
def TruncatedFromString(xml_string):
return atom.CreateClassFromXMLString(Truncated, xml_string)
class User(PhotosBaseElement):
"The Google Photo `User' element"
_tag = 'user'
def UserFromString(xml_string):
return atom.CreateClassFromXMLString(User, xml_string)
class Version(PhotosBaseElement):
"The Google Photo `Version' element"
_tag = 'version'
def VersionFromString(xml_string):
return atom.CreateClassFromXMLString(Version, xml_string)
class Width(PhotosBaseElement):
"The Google Photo `Width' element"
_tag = 'width'
def WidthFromString(xml_string):
return atom.CreateClassFromXMLString(Width, xml_string)
class Weight(PhotosBaseElement):
"""The Google Photo `Weight' element.
The weight of the tag is the number of times the tag
appears in the collection of tags currently being viewed.
The default weight is 1, in which case this tags is omitted."""
_tag = 'weight'
def WeightFromString(xml_string):
return atom.CreateClassFromXMLString(Weight, xml_string)
class CommentAuthor(atom.Author):
"""The Atom `Author' element in CommentEntry entries is augmented to
contain elements from the PHOTOS_NAMESPACE
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
_children = atom.Author._children.copy()
_children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User)
_children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname)
_children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail)
def CommentAuthorFromString(xml_string):
return atom.CreateClassFromXMLString(CommentAuthor, xml_string)
########################## ################################
class AlbumData(object):
_children = {}
_children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id)
_children['{%s}name' % PHOTOS_NAMESPACE] = ('name', Name)
_children['{%s}location' % PHOTOS_NAMESPACE] = ('location', Location)
_children['{%s}access' % PHOTOS_NAMESPACE] = ('access', Access)
_children['{%s}bytesUsed' % PHOTOS_NAMESPACE] = ('bytesUsed', BytesUsed)
_children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp)
_children['{%s}numphotos' % PHOTOS_NAMESPACE] = ('numphotos', Numphotos)
_children['{%s}numphotosremaining' % PHOTOS_NAMESPACE] = \
('numphotosremaining', Numphotosremaining)
_children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User)
_children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname)
_children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \
('commentingEnabled', CommentingEnabled)
_children['{%s}commentCount' % PHOTOS_NAMESPACE] = \
('commentCount', CommentCount)
## NOTE: storing media:group as self.media, to create a self-explaining api
gphoto_id = None
name = None
location = None
access = None
bytesUsed = None
timestamp = None
numphotos = None
numphotosremaining = None
user = None
nickname = None
commentingEnabled = None
commentCount = None
class AlbumEntry(GPhotosBaseEntry, AlbumData):
"""All metadata for a Google Photos Album
Take a look at AlbumData for metadata accessible as attributes to this object.
Notes:
To avoid name clashes, and to create a more sensible api, some
objects have names that differ from the original elements:
o media:group -> self.media,
o geo:where -> self.geo,
o photo:id -> self.gphoto_id
"""
_kind = 'album'
_children = GPhotosBaseEntry._children.copy()
_children.update(AlbumData._children.copy())
# child tags only for Album entries, not feeds
_children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where)
_children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group)
media = Media.Group()
geo = Geo.Where()
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
#GPHOTO NAMESPACE:
gphoto_id=None, name=None, location=None, access=None,
timestamp=None, numphotos=None, user=None, nickname=None,
commentingEnabled=None, commentCount=None, thumbnail=None,
# MEDIA NAMESPACE:
media=None,
# GEORSS NAMESPACE:
geo=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
GPhotosBaseEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id
self.gphoto_id = gphoto_id
self.name = name
self.location = location
self.access = access
self.timestamp = timestamp
self.numphotos = numphotos
self.user = user
self.nickname = nickname
self.commentingEnabled = commentingEnabled
self.commentCount = commentCount
self.thumbnail = thumbnail
self.extended_property = extended_property or []
self.text = text
## NOTE: storing media:group as self.media, and geo:where as geo,
## to create a self-explaining api
self.media = media or Media.Group()
self.geo = geo or Geo.Where()
def GetAlbumId(self):
"Return the id of this album"
return self.GetFeedLink().href.split('/')[-1]
def GetPhotosUri(self):
"(string) Return the uri to this albums feed of the PhotoEntry kind"
return self._feedUri('photo')
def GetCommentsUri(self):
"(string) Return the uri to this albums feed of the CommentEntry kind"
return self._feedUri('comment')
def GetTagsUri(self):
"(string) Return the uri to this albums feed of the TagEntry kind"
return self._feedUri('tag')
def AlbumEntryFromString(xml_string):
return atom.CreateClassFromXMLString(AlbumEntry, xml_string)
class AlbumFeed(GPhotosBaseFeed, AlbumData):
"""All metadata for a Google Photos Album, including its sub-elements
This feed represents an album as the container for other objects.
A Album feed contains entries of
PhotoEntry, CommentEntry or TagEntry,
depending on the `kind' parameter in the original query.
Take a look at AlbumData for accessible attributes.
"""
_children = GPhotosBaseFeed._children.copy()
_children.update(AlbumData._children.copy())
def GetPhotosUri(self):
"(string) Return the uri to the same feed, but of the PhotoEntry kind"
return self._feedUri('photo')
def GetTagsUri(self):
"(string) Return the uri to the same feed, but of the TagEntry kind"
return self._feedUri('tag')
def GetCommentsUri(self):
"(string) Return the uri to the same feed, but of the CommentEntry kind"
return self._feedUri('comment')
def AlbumFeedFromString(xml_string):
return atom.CreateClassFromXMLString(AlbumFeed, xml_string)
class PhotoData(object):
_children = {}
## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id
_children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id)
_children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid)
_children['{%s}checksum' % PHOTOS_NAMESPACE] = ('checksum', Checksum)
_children['{%s}client' % PHOTOS_NAMESPACE] = ('client', Client)
_children['{%s}height' % PHOTOS_NAMESPACE] = ('height', Height)
_children['{%s}position' % PHOTOS_NAMESPACE] = ('position', Position)
_children['{%s}rotation' % PHOTOS_NAMESPACE] = ('rotation', Rotation)
_children['{%s}size' % PHOTOS_NAMESPACE] = ('size', Size)
_children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp)
_children['{%s}version' % PHOTOS_NAMESPACE] = ('version', Version)
_children['{%s}width' % PHOTOS_NAMESPACE] = ('width', Width)
_children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \
('commentingEnabled', CommentingEnabled)
_children['{%s}commentCount' % PHOTOS_NAMESPACE] = \
('commentCount', CommentCount)
## NOTE: storing media:group as self.media, exif:tags as self.exif, and
## geo:where as self.geo, to create a self-explaining api
_children['{%s}tags' % EXIF_NAMESPACE] = ('exif', Exif.Tags)
_children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where)
_children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group)
# These elements show up in search feeds
_children['{%s}snippet' % PHOTOS_NAMESPACE] = ('snippet', Snippet)
_children['{%s}snippettype' % PHOTOS_NAMESPACE] = ('snippettype', Snippettype)
_children['{%s}truncated' % PHOTOS_NAMESPACE] = ('truncated', Truncated)
gphoto_id = None
albumid = None
checksum = None
client = None
height = None
position = None
rotation = None
size = None
timestamp = None
version = None
width = None
commentingEnabled = None
commentCount = None
snippet=None
snippettype=None
truncated=None
media = Media.Group()
geo = Geo.Where()
tags = Exif.Tags()
class PhotoEntry(GPhotosBaseEntry, PhotoData):
"""All metadata for a Google Photos Photo
Take a look at PhotoData for metadata accessible as attributes to this object.
Notes:
To avoid name clashes, and to create a more sensible api, some
objects have names that differ from the original elements:
o media:group -> self.media,
o exif:tags -> self.exif,
o geo:where -> self.geo,
o photo:id -> self.gphoto_id
"""
_kind = 'photo'
_children = GPhotosBaseEntry._children.copy()
_children.update(PhotoData._children.copy())
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None, text=None,
# GPHOTO NAMESPACE:
gphoto_id=None, albumid=None, checksum=None, client=None, height=None,
position=None, rotation=None, size=None, timestamp=None, version=None,
width=None, commentCount=None, commentingEnabled=None,
# MEDIARSS NAMESPACE:
media=None,
# EXIF_NAMESPACE:
exif=None,
# GEORSS NAMESPACE:
geo=None,
extension_elements=None, extension_attributes=None):
GPhotosBaseEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id
self.gphoto_id = gphoto_id
self.albumid = albumid
self.checksum = checksum
self.client = client
self.height = height
self.position = position
self.rotation = rotation
self.size = size
self.timestamp = timestamp
self.version = version
self.width = width
self.commentingEnabled = commentingEnabled
self.commentCount = commentCount
## NOTE: storing media:group as self.media, to create a self-explaining api
self.media = media or Media.Group()
self.exif = exif or Exif.Tags()
self.geo = geo or Geo.Where()
def GetPostLink(self):
"Return the uri to this photo's `POST' link (use it for updates of the object)"
return self.GetFeedLink()
def GetCommentsUri(self):
"Return the uri to this photo's feed of CommentEntry comments"
return self._feedUri('comment')
def GetTagsUri(self):
"Return the uri to this photo's feed of TagEntry tags"
return self._feedUri('tag')
def GetAlbumUri(self):
"""Return the uri to the AlbumEntry containing this photo"""
href = self.GetSelfLink().href
return href[:href.find('/photoid')]
def PhotoEntryFromString(xml_string):
return atom.CreateClassFromXMLString(PhotoEntry, xml_string)
class PhotoFeed(GPhotosBaseFeed, PhotoData):
"""All metadata for a Google Photos Photo, including its sub-elements
This feed represents a photo as the container for other objects.
A Photo feed contains entries of
CommentEntry or TagEntry,
depending on the `kind' parameter in the original query.
Take a look at PhotoData for metadata accessible as attributes to this object.
"""
_children = GPhotosBaseFeed._children.copy()
_children.update(PhotoData._children.copy())
def GetTagsUri(self):
"(string) Return the uri to the same feed, but of the TagEntry kind"
return self._feedUri('tag')
def GetCommentsUri(self):
"(string) Return the uri to the same feed, but of the CommentEntry kind"
return self._feedUri('comment')
def PhotoFeedFromString(xml_string):
return atom.CreateClassFromXMLString(PhotoFeed, xml_string)
class TagData(GPhotosBaseData):
_children = {}
_children['{%s}weight' % PHOTOS_NAMESPACE] = ('weight', Weight)
weight=None
class TagEntry(GPhotosBaseEntry, TagData):
"""All metadata for a Google Photos Tag
The actual tag is stored in the .title.text attribute
"""
_kind = 'tag'
_children = GPhotosBaseEntry._children.copy()
_children.update(TagData._children.copy())
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
# GPHOTO NAMESPACE:
weight=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
GPhotosBaseEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated, text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.weight = weight
def GetAlbumUri(self):
"""Return the uri to the AlbumEntry containing this tag"""
href = self.GetSelfLink().href
pos = href.find('/photoid')
if pos == -1:
return None
return href[:pos]
def GetPhotoUri(self):
"""Return the uri to the PhotoEntry containing this tag"""
href = self.GetSelfLink().href
pos = href.find('/tag')
if pos == -1:
return None
return href[:pos]
def TagEntryFromString(xml_string):
return atom.CreateClassFromXMLString(TagEntry, xml_string)
class TagFeed(GPhotosBaseFeed, TagData):
"""All metadata for a Google Photos Tag, including its sub-elements"""
_children = GPhotosBaseFeed._children.copy()
_children.update(TagData._children.copy())
def TagFeedFromString(xml_string):
return atom.CreateClassFromXMLString(TagFeed, xml_string)
class CommentData(GPhotosBaseData):
_children = {}
## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id
_children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id)
_children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid)
_children['{%s}photoid' % PHOTOS_NAMESPACE] = ('photoid', Photoid)
_children['{%s}author' % atom.ATOM_NAMESPACE] = ('author', [CommentAuthor,])
gphoto_id=None
albumid=None
photoid=None
author=None
class CommentEntry(GPhotosBaseEntry, CommentData):
"""All metadata for a Google Photos Comment
The comment is stored in the .content.text attribute,
with a content type in .content.type.
"""
_kind = 'comment'
_children = GPhotosBaseEntry._children.copy()
_children.update(CommentData._children.copy())
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
# GPHOTO NAMESPACE:
gphoto_id=None, albumid=None, photoid=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
GPhotosBaseEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.gphoto_id = gphoto_id
self.albumid = albumid
self.photoid = photoid
def GetCommentId(self):
"""Return the globally unique id of this comment"""
return self.GetSelfLink().href.split('/')[-1]
def GetAlbumUri(self):
"""Return the uri to the AlbumEntry containing this comment"""
href = self.GetSelfLink().href
return href[:href.find('/photoid')]
def GetPhotoUri(self):
"""Return the uri to the PhotoEntry containing this comment"""
href = self.GetSelfLink().href
return href[:href.find('/commentid')]
def CommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CommentEntry, xml_string)
class CommentFeed(GPhotosBaseFeed, CommentData):
"""All metadata for a Google Photos Comment, including its sub-elements"""
_children = GPhotosBaseFeed._children.copy()
_children.update(CommentData._children.copy())
def CommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CommentFeed, xml_string)
class UserData(GPhotosBaseData):
_children = {}
_children['{%s}maxPhotosPerAlbum' % PHOTOS_NAMESPACE] = ('maxPhotosPerAlbum', MaxPhotosPerAlbum)
_children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname)
_children['{%s}quotalimit' % PHOTOS_NAMESPACE] = ('quotalimit', Quotalimit)
_children['{%s}quotacurrent' % PHOTOS_NAMESPACE] = ('quotacurrent', Quotacurrent)
_children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail)
_children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User)
_children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id)
maxPhotosPerAlbum=None
nickname=None
quotalimit=None
quotacurrent=None
thumbnail=None
user=None
gphoto_id=None
class UserEntry(GPhotosBaseEntry, UserData):
"""All metadata for a Google Photos User
This entry represents an album owner and all appropriate metadata.
Take a look at at the attributes of the UserData for metadata available.
"""
_children = GPhotosBaseEntry._children.copy()
_children.update(UserData._children.copy())
_kind = 'user'
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
# GPHOTO NAMESPACE:
gphoto_id=None, maxPhotosPerAlbum=None, nickname=None, quotalimit=None,
quotacurrent=None, thumbnail=None, user=None,
extended_property=None,
extension_elements=None, extension_attributes=None, text=None):
GPhotosBaseEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.gphoto_id=gphoto_id
self.maxPhotosPerAlbum=maxPhotosPerAlbum
self.nickname=nickname
self.quotalimit=quotalimit
self.quotacurrent=quotacurrent
self.thumbnail=thumbnail
self.user=user
def GetAlbumsUri(self):
"(string) Return the uri to this user's feed of the AlbumEntry kind"
return self._feedUri('album')
def GetPhotosUri(self):
"(string) Return the uri to this user's feed of the PhotoEntry kind"
return self._feedUri('photo')
def GetCommentsUri(self):
"(string) Return the uri to this user's feed of the CommentEntry kind"
return self._feedUri('comment')
def GetTagsUri(self):
"(string) Return the uri to this user's feed of the TagEntry kind"
return self._feedUri('tag')
def UserEntryFromString(xml_string):
return atom.CreateClassFromXMLString(UserEntry, xml_string)
class UserFeed(GPhotosBaseFeed, UserData):
"""Feed for a User in the google photos api.
This feed represents a user as the container for other objects.
A User feed contains entries of
AlbumEntry, PhotoEntry, CommentEntry, UserEntry or TagEntry,
depending on the `kind' parameter in the original query.
The user feed itself also contains all of the metadata available
as part of a UserData object."""
_children = GPhotosBaseFeed._children.copy()
_children.update(UserData._children.copy())
def GetAlbumsUri(self):
"""Get the uri to this feed, but with entries of the AlbumEntry kind."""
return self._feedUri('album')
def GetTagsUri(self):
"""Get the uri to this feed, but with entries of the TagEntry kind."""
return self._feedUri('tag')
def GetPhotosUri(self):
"""Get the uri to this feed, but with entries of the PhotosEntry kind."""
return self._feedUri('photo')
def GetCommentsUri(self):
"""Get the uri to this feed, but with entries of the CommentsEntry kind."""
return self._feedUri('comment')
def UserFeedFromString(xml_string):
return atom.CreateClassFromXMLString(UserFeed, xml_string)
def AnyFeedFromString(xml_string):
"""Creates an instance of the appropriate feed class from the
xml string contents.
Args:
xml_string: str A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
Returns:
An instance of the target class with members assigned according to the
contents of the XML - or a basic gdata.GDataFeed instance if it is
impossible to determine the appropriate class (look for extra elements
in GDataFeed's .FindExtensions() and extension_elements[] ).
"""
tree = ElementTree.fromstring(xml_string)
category = tree.find('{%s}category' % atom.ATOM_NAMESPACE)
if category is None:
# TODO: is this the best way to handle this?
return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree)
namespace, kind = category.get('term').split('#')
if namespace != PHOTOS_NAMESPACE:
# TODO: is this the best way to handle this?
return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree)
## TODO: is getattr safe this way?
feed_class = getattr(gdata.photos, '%sFeed' % kind.title())
return atom._CreateClassFromElementTree(feed_class, tree)
def AnyEntryFromString(xml_string):
"""Creates an instance of the appropriate entry class from the
xml string contents.
Args:
xml_string: str A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
Returns:
An instance of the target class with members assigned according to the
contents of the XML - or a basic gdata.GDataEndry instance if it is
impossible to determine the appropriate class (look for extra elements
in GDataEntry's .FindExtensions() and extension_elements[] ).
"""
tree = ElementTree.fromstring(xml_string)
category = tree.find('{%s}category' % atom.ATOM_NAMESPACE)
if category is None:
# TODO: is this the best way to handle this?
return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree)
namespace, kind = category.get('term').split('#')
if namespace != PHOTOS_NAMESPACE:
# TODO: is this the best way to handle this?
return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree)
## TODO: is getattr safe this way?
feed_class = getattr(gdata.photos, '%sEntry' % kind.title())
return atom._CreateClassFromElementTree(feed_class, tree)
| Python |
#!/usr/bin/env python
# -*-*- encoding: utf-8 -*-*-
#
# This is the service file for the Google Photo python client.
# It is used for higher level operations.
#
# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google PhotoService provides a human-friendly interface to
Google Photo (a.k.a Picasa Web) services[1].
It extends gdata.service.GDataService and as such hides all the
nasty details about authenticating, parsing and communicating with
Google Photos.
[1]: http://code.google.com/apis/picasaweb/gdata.html
Example:
import gdata.photos, gdata.photos.service
pws = gdata.photos.service.PhotosService()
pws.ClientLogin(username, password)
#Get all albums
albums = pws.GetUserFeed().entry
# Get all photos in second album
photos = pws.GetFeed(albums[1].GetPhotosUri()).entry
# Get all tags for photos in second album and print them
tags = pws.GetFeed(albums[1].GetTagsUri()).entry
print [ tag.summary.text for tag in tags ]
# Get all comments for the first photos in list and print them
comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry
print [ c.summary.text for c in comments ]
# Get a photo to work with
photo = photos[0]
# Update metadata
# Attributes from the <gphoto:*> namespace
photo.summary.text = u'A nice view from my veranda'
photo.title.text = u'Verandaview.jpg'
# Attributes from the <media:*> namespace
photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated
# Adding attributes to media object
# Rotate 90 degrees clockwise
photo.rotation = gdata.photos.Rotation(text='90')
# Submit modified photo object
photo = pws.UpdatePhotoMetadata(photo)
# Make sure you only modify the newly returned object, else you'll get
# versioning errors. See Optimistic-concurrency
# Add comment to a picture
comment = pws.InsertComment(photo, u'I wish the water always was this warm')
# Remove comment because it was silly
print "*blush*"
pws.Delete(comment.GetEditLink().href)
"""
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
__version__ = '$Revision: 176 $'[11:-2]
import sys, os.path, StringIO
import time
import gdata.service
import gdata
import atom.service
import atom
import gdata.photos
SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png')
UNKOWN_ERROR=1000
GPHOTOS_BAD_REQUEST=400
GPHOTOS_CONFLICT=409
GPHOTOS_INTERNAL_SERVER_ERROR=500
GPHOTOS_INVALID_ARGUMENT=601
GPHOTOS_INVALID_CONTENT_TYPE=602
GPHOTOS_NOT_AN_IMAGE=603
GPHOTOS_INVALID_KIND=604
class GooglePhotosException(Exception):
def __init__(self, response):
self.error_code = response['status']
self.reason = response['reason'].strip()
if '<html>' in str(response['body']): #general html message, discard it
response['body'] = ""
self.body = response['body'].strip()
self.message = "(%(status)s) %(body)s -- %(reason)s" % response
#return explicit error codes
error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE,
'kind: That is not one of the acceptable values':
GPHOTOS_INVALID_KIND,
}
for msg, code in error_map.iteritems():
if self.body == msg:
self.error_code = code
break
self.args = [self.error_code, self.reason, self.body]
class PhotosService(gdata.service.GDataService):
userUri = '/data/feed/api/user/%s'
def __init__(self, email=None, password=None,
source=None, server='picasaweb.google.com', additional_headers=None):
""" GooglePhotosService constructor.
Arguments:
email: string (optional) The e-mail address of the account to use for
authentication.
password: string (optional) The password of the account to use for
authentication.
source: string (optional) The name of the user's application.
server: string (optional) The server the feed is hosted on.
additional_headers: dict (optional) Any additional HTTP headers to be
transmitted to the service in the form of key-value
pairs.
Returns:
A PhotosService object used to communicate with the Google Photos
service.
"""
self.email = email
self.client = source
gdata.service.GDataService.__init__(self, email=self.email, password=password,
service='lh2', source=source,
server=server,
additional_headers=additional_headers)
def GetFeed(self, uri, limit=None, start_index=None):
"""Get a feed.
The results are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
uri: the uri to fetch
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumFeed,
gdata.photos.UserFeed,
gdata.photos.PhotoFeed,
gdata.photos.CommentFeed,
gdata.photos.TagFeed,
depending on the results of the query.
Raises:
GooglePhotosException
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyFeedFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetEntry(self, uri, limit=None, start_index=None):
"""Get an Entry.
Arguments:
uri: the uri to the entry
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumEntry,
gdata.photos.UserEntry,
gdata.photos.PhotoEntry,
gdata.photos.CommentEntry,
gdata.photos.TagEntry,
depending on the results of the query.
Raises:
GooglePhotosException
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetUserFeed(self, kind='album', user='default', limit=None):
"""Get user-based feed, containing albums, photos, comments or tags;
defaults to albums.
The entries are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
kind: the kind of entries to get, either `album', `photo',
`comment' or `tag', or a python list of these. Defaults to `album'.
user (optional): whose albums we're querying. Defaults to current user.
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed, containing appropriate Entry elements
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html
"""
if isinstance(kind, (list, tuple) ):
kind = ",".join(kind)
uri = '/data/feed/api/user/%s?kind=%s' % (user, kind)
return self.GetFeed(uri, limit=limit)
def GetTaggedPhotos(self, tag, user='default', limit=None):
"""Get all photos belonging to a specific user, tagged by the given keyword
Arguments:
tag: The tag you're looking for, e.g. `dog'
user (optional): Whose images/videos you want to search, defaults
to current user
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed containing PhotoEntry elements
"""
# Lower-casing because of
# http://code.google.com/p/gdata-issues/issues/detail?id=194
uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower())
return self.GetFeed(uri, limit)
def SearchUserPhotos(self, query, user='default', limit=100):
"""Search through all photos for a specific user and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
user (optional): The username of whose photos you want to search, defaults
to current user.
limit (optional): Don't return more than `limit' hits, defaults to 100
Only public photos are searched, unless you are authenticated and
searching through your own photos.
Returns:
gdata.photos.UserFeed with PhotoEntry elements
"""
uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query)
return self.GetFeed(uri, limit=limit)
def SearchCommunityPhotos(self, query, limit=100):
"""Search through all public photos and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
limit (optional): Don't return more than `limit' hits, defaults to 100
Returns:
gdata.GDataFeed with PhotoEntry elements
"""
uri='/data/feed/api/all?q=%s' % query
return self.GetFeed(uri, limit=limit)
def GetContacts(self, user='default', limit=None):
"""Retrieve a feed that contains a list of your contacts
Arguments:
user: Username of the user whose contacts you want
Returns
gdata.photos.UserFeed, with UserEntry entries
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=user' % user
return self.GetFeed(uri, limit=limit)
def SearchContactsPhotos(self, user='default', search=None, limit=None):
"""Search over your contacts' photos and return a feed
Arguments:
user: Username of the user whose contacts you want
search (optional): What to search for (photo title, description and keywords)
Returns
gdata.photos.UserFeed, with PhotoEntry elements
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search)
return self.GetFeed(uri, limit=limit)
def InsertAlbum(self, title, summary, location=None, access='public',
commenting_enabled='true', timestamp=None):
"""Add an album.
Needs authentication, see self.ClientLogin()
Arguments:
title: Album title
summary: Album summary / description
access (optional): `private' or `public'. Public albums are searchable
by everyone on the internet. Defaults to `public'
commenting_enabled (optional): `true' or `false'. Defaults to `true'.
timestamp (optional): A date and time for the album, in milliseconds since
Unix epoch[1] UTC. Defaults to now.
Returns:
The newly created gdata.photos.AlbumEntry
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
album = gdata.photos.AlbumEntry()
album.title = atom.Title(text=title, title_type='text')
album.summary = atom.Summary(text=summary, summary_type='text')
if location is not None:
album.location = gdata.photos.Location(text=location)
album.access = gdata.photos.Access(text=access)
if commenting_enabled in ('true', 'false'):
album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled)
if timestamp is None:
timestamp = '%i' % int(time.time() * 1000)
album.timestamp = gdata.photos.Timestamp(text=timestamp)
try:
return self.Post(album, uri=self.userUri % self.email,
converter=gdata.photos.AlbumEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertPhoto(self, album_or_uri, photo, filename_or_handle,
content_type='image/jpeg'):
"""Add a PhotoEntry
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
photo: PhotoEntry to add
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
"""
try:
assert(isinstance(photo, gdata.photos.PhotoEntry))
except AssertionError:
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`photo` must be a gdata.photos.PhotoEntry instance',
'reason':'Found %s, not PhotoEntry' % type(photo)
})
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, (str, unicode)) and \
os.path.exists(filename_or_handle): # it's a file name
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or object with a .read() method' % \
type(filename_or_handle)
})
if isinstance(album_or_uri, (str, unicode)): # it's a uri
feed_uri = album_or_uri
elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object
feed_uri = album_or_uri.GetFeedLink().href
try:
return self.Post(photo, uri=feed_uri, media_source=mediasource,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle,
content_type='image/jpeg', keywords=None):
"""Add a photo without constructing a PhotoEntry.
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
title: Photo title
summary: Photo summary / description
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
keywords (optional): a 1) comma separated string or 2) a python list() of
keywords (a.k.a. tags) to add to the image.
E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation']
Returns:
The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
metadata = gdata.photos.PhotoEntry()
metadata.title=atom.Title(text=title)
metadata.summary = atom.Summary(text=summary, summary_type='text')
if keywords is not None:
if isinstance(keywords, list):
keywords = ','.join(keywords)
metadata.media.keywords = gdata.media.Keywords(text=keywords)
return self.InsertPhoto(album_or_uri, metadata, filename_or_handle,
content_type)
def UpdatePhotoMetadata(self, photo):
"""Update a photo's metadata.
Needs authentication, see self.ClientLogin()
You can update any or all of the following metadata properties:
* <title>
* <media:description>
* <gphoto:checksum>
* <gphoto:client>
* <gphoto:rotation>
* <gphoto:timestamp>
* <gphoto:commentingEnabled>
Arguments:
photo: a gdata.photos.PhotoEntry object with updated elements
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(uri).entry[0]
p.title.text = u'My new text'
p.commentingEnabled.text = 'false'
p = UpdatePhotoMetadata(p)
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
return self.Put(data=photo, uri=photo.GetEditLink().href,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle,
content_type = 'image/jpeg'):
"""Update a photo's binary data.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a
`edit-media' uri pointing to it
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(PhotoUri)
p = UpdatePhotoBlob(p, '/tmp/newPic.jpg')
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, (str, unicode)) and \
os.path.exists(filename_or_handle): # it's a file name
photoblob = gdata.MediaSource()
photoblob.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or an object with .read() method' % \
type(filename_or_handle)
})
if isinstance(photo_or_uri, (str, unicode)):
entry_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
entry_uri = photo_or_uri.GetEditMediaLink().href
try:
return self.Put(photoblob, entry_uri,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertTag(self, photo_or_uri, tag):
"""Add a tag (a.k.a. keyword) to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a
`post' uri pointing to it
(string) tag: The tag/keyword
Returns:
The new gdata.photos.TagEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertTag(p, 'Beautiful sunsets')
"""
tag = gdata.photos.TagEntry(title=atom.Title(text=tag))
if isinstance(photo_or_uri, (str, unicode)):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=tag, uri=post_uri,
converter=gdata.photos.TagEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertComment(self, photo_or_uri, comment):
"""Add a comment to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented
, or a `post' uri pointing to it
(string) comment: The actual comment
Returns:
The new gdata.photos.CommentEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertComment(p, 'OOOH! I would have loved to be there.
Who's that in the back?')
"""
comment = gdata.photos.CommentEntry(content=atom.Content(text=comment))
if isinstance(photo_or_uri, (str, unicode)):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=comment, uri=post_uri,
converter=gdata.photos.CommentEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def Delete(self, object_or_uri, *args, **kwargs):
"""Delete an object.
Re-implementing the GDataService.Delete method, to add some
convenience.
Arguments:
object_or_uri: Any object that has a GetEditLink() method that
returns a link, or a uri to that object.
Returns:
? or GooglePhotosException on errors
"""
try:
uri = object_or_uri.GetEditLink().href
except AttributeError:
uri = object_or_uri
try:
return gdata.service.GDataService.Delete(self, uri, *args, **kwargs)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetSmallestThumbnail(media_thumbnail_list):
"""Helper function to get the smallest thumbnail of a list of
gdata.media.Thumbnail.
Returns gdata.media.Thumbnail """
r = {}
for thumb in media_thumbnail_list:
r[int(thumb.width)*int(thumb.height)] = thumb
keys = r.keys()
keys.sort()
return r[keys[0]]
def ConvertAtomTimestampToEpoch(timestamp):
"""Helper function to convert a timestamp string, for instance
from atom:updated or atom:published, to milliseconds since Unix epoch
(a.k.a. POSIX time).
`2007-07-22T00:45:10.000Z' -> """
return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z'))
## TODO: Timezone aware
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes representing Google Data elements.
Extends Atom classes to add Google Data specific elements.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import os
import atom
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
# XML namespaces which are often used in GData entities.
GDATA_NAMESPACE = 'http://schemas.google.com/g/2005'
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
GACL_NAMESPACE = 'http://schemas.google.com/acl/2007'
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class MediaSource(object):
"""GData Entries can refer to media sources, so this class provides a
place to store references to these objects along with some metadata.
"""
def __init__(self, file_handle=None, content_type=None, content_length=None,
file_path=None, file_name=None):
"""Creates an object of type MediaSource.
Args:
file_handle: A file handle pointing to the file to be encapsulated in the
MediaSource
content_type: string The MIME type of the file. Required if a file_handle
is given.
content_length: int The size of the file. Required if a file_handle is
given.
file_path: string (optional) A full path name to the file. Used in
place of a file_handle.
file_name: string The name of the file without any path information.
Required if a file_handle is given.
"""
self.file_handle = file_handle
self.content_type = content_type
self.content_length = content_length
self.file_name = file_name
if (file_handle is None and content_type is not None and
file_path is not None):
self.setFile(file_path, content_type)
def setFile(self, file_name, content_type):
"""A helper function which can create a file handle from a given filename
and set the content type and length all at once.
Args:
file_name: string The path and file name to the file containing the media
content_type: string A MIME type representing the type of the media
"""
self.file_handle = open(file_name, 'rb')
self.content_type = content_type
self.content_length = os.path.getsize(file_name)
self.file_name = os.path.basename(file_name)
class LinkFinder(atom.LinkFinder):
"""An "interface" providing methods to find link elements
GData Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in GData entries.
"""
def GetSelfLink(self):
"""Find the first link with rel set to 'self'
Returns:
An atom.Link or none if none of the links had rel equal to 'self'
"""
for a_link in self.link:
if a_link.rel == 'self':
return a_link
return None
def GetEditLink(self):
for a_link in self.link:
if a_link.rel == 'edit':
return a_link
return None
def GetEditMediaLink(self):
"""The Picasa API mistakenly returns media-edit rather than edit-media, but
this may change soon.
"""
for a_link in self.link:
if a_link.rel == 'edit-media':
return a_link
if a_link.rel == 'media-edit':
return a_link
return None
def GetHtmlLink(self):
"""Find the first link with rel of alternate and type of text/html
Returns:
An atom.Link or None if no links matched
"""
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
def GetPostLink(self):
"""Get a link containing the POST target URL.
The POST target URL is used to insert new entries.
Returns:
A link object with a rel matching the POST type.
"""
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#post':
return a_link
return None
def GetAclLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList':
return a_link
return None
def GetFeedLink(self):
for a_link in self.link:
if a_link.rel == 'http://schemas.google.com/g/2005#feed':
return a_link
return None
def GetNextLink(self):
for a_link in self.link:
if a_link.rel == 'next':
return a_link
return None
def GetPrevLink(self):
for a_link in self.link:
if a_link.rel == 'previous':
return a_link
return None
class TotalResults(atom.AtomBase):
"""opensearch:TotalResults for a GData feed"""
_tag = 'totalResults'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def TotalResultsFromString(xml_string):
return atom.CreateClassFromXMLString(TotalResults, xml_string)
class StartIndex(atom.AtomBase):
"""The opensearch:startIndex element in GData feed"""
_tag = 'startIndex'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def StartIndexFromString(xml_string):
return atom.CreateClassFromXMLString(StartIndex, xml_string)
class ItemsPerPage(atom.AtomBase):
"""The opensearch:itemsPerPage element in GData feed"""
_tag = 'itemsPerPage'
_namespace = OPENSEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemsPerPageFromString(xml_string):
return atom.CreateClassFromXMLString(ItemsPerPage, xml_string)
class ExtendedProperty(atom.AtomBase):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_tag = 'extendedProperty'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GetXmlBlobExtensionElement(self):
"""Returns the XML blob as an atom.ExtensionElement.
Returns:
An atom.ExtensionElement representing the blob's XML, or None if no
blob was set.
"""
if len(self.extension_elements) < 1:
return None
else:
return self.extension_elements[0]
def GetXmlBlobString(self):
"""Returns the XML blob as a string.
Returns:
A string containing the blob's XML, or None if no blob was set.
"""
blob = self.GetXmlBlobExtensionElement()
if blob:
return blob.ToString()
return None
def SetXmlBlob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting extension elements
in this object.
Args:
blob: str, ElementTree Element or atom.ExtensionElement representing
the XML blob stored in the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
self.extension_elements = []
if isinstance(blob, atom.ExtensionElement):
self.extension_elements.append(blob)
elif ElementTree.iselement(blob):
self.extension_elements.append(atom._ExtensionElementFromElementTree(
blob))
else:
self.extension_elements.append(atom.ExtensionElementFromString(blob))
def ExtendedPropertyFromString(xml_string):
return atom.CreateClassFromXMLString(ExtendedProperty, xml_string)
class GDataEntry(atom.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
_tag = atom.Entry._tag
_namespace = atom.Entry._namespace
_children = atom.Entry._children.copy()
_attributes = atom.Entry._attributes.copy()
def __GetId(self):
return self.__id
# This method was created to strip the unwanted whitespace from the id's
# text node.
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def IsMedia(self):
"""Determines whether or not an entry is a GData Media entry.
"""
if (self.GetEditMediaLink()):
return True
else:
return False
def GetMediaURL(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if not self.IsMedia():
return None
else:
return self.content.src
def GDataEntryFromString(xml_string):
"""Creates a new GDataEntry instance given a string of XML."""
return atom.CreateClassFromXMLString(GDataEntry, xml_string)
class GDataFeed(atom.Feed, LinkFinder):
"""A Feed from a GData service"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = atom.Feed._children.copy()
_attributes = atom.Feed._attributes.copy()
_children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results',
TotalResults)
_children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index',
StartIndex)
_children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page',
ItemsPerPage)
# Add a conversion rule for atom:entry to make it into a GData
# Entry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry])
def __GetId(self):
return self.__id
def __SetId(self, id):
self.__id = id
if id is not None and id.text is not None:
self.__id.text = id.text.strip()
id = property(__GetId, __SetId)
def __GetGenerator(self):
return self.__generator
def __SetGenerator(self, generator):
self.__generator = generator
if generator is not None:
self.__generator.text = generator.text.strip()
generator = property(__GetGenerator, __SetGenerator)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Source
Args:
author: list (optional) A list of Author instances which belong to this
class.
category: list (optional) A list of Category instances
contributor: list (optional) A list on Contributor instances
generator: Generator (optional)
icon: Icon (optional)
id: Id (optional) The entry's Id element
link: list (optional) A list of Link instances
logo: Logo (optional)
rights: Rights (optional) The entry's Rights element
subtitle: Subtitle (optional) The entry's subtitle element
title: Title (optional) the entry's title element
updated: Updated (optional) the entry's updated element
entry: list (optional) A list of the Entry instances contained in the
feed.
text: String (optional) The text contents of the element. This is the
contents of the Entry's XML text node.
(Example: <foo>This is the text</foo>)
extension_elements: list (optional) A list of ExtensionElement instances
which are children of this element.
extension_attributes: dict (optional) A dictionary of strings which are
the values for additional XML attributes of this element.
"""
self.author = author or []
self.category = category or []
self.contributor = contributor or []
self.generator = generator
self.icon = icon
self.id = atom_id
self.link = link or []
self.logo = logo
self.rights = rights
self.subtitle = subtitle
self.title = title
self.updated = updated
self.entry = entry or []
self.total_results = total_results
self.start_index = start_index
self.items_per_page = items_per_page
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GDataFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GDataFeed, xml_string)
class BatchId(atom.AtomBase):
_tag = 'id'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def BatchIdFromString(xml_string):
return atom.CreateClassFromXMLString(BatchId, xml_string)
class BatchOperation(atom.AtomBase):
_tag = 'operation'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, op_type=None, extension_elements=None,
extension_attributes=None,
text=None):
self.type = op_type
atom.AtomBase.__init__(self,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchOperationFromString(xml_string):
return atom.CreateClassFromXMLString(BatchOperation, xml_string)
class BatchStatus(atom.AtomBase):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'status'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['code'] = 'code'
_attributes['reason'] = 'reason'
_attributes['content-type'] = 'content_type'
def __init__(self, code=None, reason=None, content_type=None,
extension_elements=None, extension_attributes=None, text=None):
self.code = code
self.reason = reason
self.content_type = content_type
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchStatusFromString(xml_string):
return atom.CreateClassFromXMLString(BatchStatus, xml_string)
class BatchEntry(GDataEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
_tag = GDataEntry._tag
_namespace = GDataEntry._namespace
_children = GDataEntry._children.copy()
_children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation)
_children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId)
_children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus)
_attributes = GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
batch_operation=None, batch_id=None, batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
GDataEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def BatchEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BatchEntry, xml_string)
class BatchInterrupted(atom.AtomBase):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_tag = 'interrupted'
_namespace = BATCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['reason'] = 'reason'
_attributes['success'] = 'success'
_attributes['failures'] = 'failures'
_attributes['parsed'] = 'parsed'
def __init__(self, reason=None, success=None, failures=None, parsed=None,
extension_elements=None, extension_attributes=None, text=None):
self.reason = reason
self.success = success
self.failures = failures
self.parsed = parsed
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def BatchInterruptedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchInterrupted, xml_string)
class BatchFeed(GDataFeed):
"""A feed containing a list of batch request entries."""
_tag = GDataFeed._tag
_namespace = GDataFeed._namespace
_children = GDataFeed._children.copy()
_attributes = GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry])
_children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
interrupted=None,
extension_elements=None, extension_attributes=None, text=None):
self.interrupted = interrupted
GDataFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results, start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
def AddBatchEntry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
entry which will be sent to the server as part of the batch request.
The item must have a valid atom id so that the server knows which
entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(atom_id=atom.Id(text=id_url_string))
# TODO: handle cases in which the entry lacks batch_... members.
#if not isinstance(entry, BatchEntry):
# Convert the entry to a batch entry.
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(op_type=operation_string)
self.entry.append(entry)
return entry
def AddInsert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
def AddUpdate(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
def AddDelete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_DELETE)
def AddQuery(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
entry = self.AddBatchEntry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string,
operation_string=BATCH_QUERY)
def GetBatchLink(self):
for link in self.link:
if link.rel == 'http://schemas.google.com/g/2005#batch':
return link
return None
def BatchFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BatchFeed, xml_string)
class EntryLink(atom.AtomBase):
"""The gd:entryLink element"""
_tag = 'entryLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
# The entry used to be an atom.Entry, now it is a GDataEntry.
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry)
_attributes['rel'] = 'rel',
_attributes['readOnly'] = 'read_only'
_attributes['href'] = 'href'
def __init__(self, href=None, read_only=None, rel=None,
entry=None, extension_elements=None,
extension_attributes=None, text=None):
self.href = href
self.read_only = read_only
self.rel = rel
self.entry = entry
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def EntryLinkFromString(xml_string):
return atom.CreateClassFromXMLString(EntryLink, xml_string)
class FeedLink(atom.AtomBase):
"""The gd:feedLink element"""
_tag = 'feedLink'
_namespace = GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed)
_attributes['rel'] = 'rel'
_attributes['readOnly'] = 'read_only'
_attributes['countHint'] = 'count_hint'
_attributes['href'] = 'href'
def __init__(self, count_hint=None, href=None, read_only=None, rel=None,
feed=None, extension_elements=None, extension_attributes=None,
text=None):
self.count_hint = count_hint
self.href = href
self.read_only = read_only
self.rel = rel
self.feed = feed
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def FeedLinkFromString(xml_string):
return atom.CreateClassFromXMLString(EntryLink, xml_string)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GDataService provides CRUD ops. and programmatic login for GData services.
Error: A base exception class for all exceptions in the gdata_client
module.
CaptchaRequired: This exception is thrown when a login attempt results in a
captcha challenge from the ClientLogin service. When this
exception is thrown, the captcha_token and captcha_url are
set to the values provided in the server's response.
BadAuthentication: Raised when a login attempt is made with an incorrect
username or password.
NotAuthenticated: Raised if an operation requiring authentication is called
before a user has authenticated.
NonAuthSubToken: Raised if a method to modify an AuthSub token is used when
the user is either not authenticated or is authenticated
through programmatic login.
RequestError: Raised if a CRUD request returned a non-success code.
UnexpectedReturnType: Raised if the response from the server was not of the
desired type. For example, this would be raised if the
server sent a feed when the client requested an entry.
GDataService: Encapsulates user credentials needed to perform insert, update
and delete operations with the GData API. An instance can
perform user authentication, query, insertion, deletion, and
update.
Query: Eases query URI creation by allowing URI parameters to be set as
dictionary attributes. For example a query with a feed of
'/base/feeds/snippets' and ['bq'] set to 'digital camera' will
produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is
called on it.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import re
import urllib
import urlparse
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.service
import gdata
import atom
import atom.http_interface
import atom.token_store
import gdata.auth
AUTH_SERVER_HOST = 'https://www.google.com'
# When requesting an AuthSub token, it is often helpful to track the scope
# which is being requested. One way to accomplish this is to add a URL
# parameter to the 'next' URL which contains the requested scope. This
# constant is the default name (AKA key) for the URL parameter.
SCOPE_URL_PARAM_NAME = 'authsub_token_scope'
# Maps the service names used in ClientLogin to scope URLs.
CLIENT_LOGIN_SCOPES = {
'cl': [ # Google Calendar
'https://www.google.com/calendar/feeds/',
'http://www.google.com/calendar/feeds/'],
'gbase': [ # Google Base
'http://base.google.com/base/feeds/',
'http://www.google.com/base/feeds/'],
'blogger': [ # Blogger
'http://www.blogger.com/feeds/'],
'codesearch': [ # Google Code Search
'http://www.google.com/codesearch/feeds/'],
'cp': [ # Contacts API
'https://www.google.com/m8/feeds/',
'http://www.google.com/m8/feeds/'],
'finance': [ # Google Finance
'http://finance.google.com/finance/feeds/'],
'health': [ # Google Health
'https://www.google.com/health/feeds/'],
'writely': [ # Documents List API
'https://docs.google.com/feeds/',
'http://docs.google.com/feeds/'],
'lh2': [ # Picasa Web Albums
'http://picasaweb.google.com/data/'],
'apps': [ # Google Apps Provisioning API
'http://www.google.com/a/feeds/',
'https://www.google.com/a/feeds/'],
'wise': [ # Spreadsheets Data API
'https://spreadsheets.google.com/feeds/',
'http://spreadsheets.google.com/feeds/'],
'sitemaps': [ # Google Webmaster Tools
'https://www.google.com/webmasters/tools/feeds/'],
'youtube': [ # YouTube
'http://gdata.youtube.com/feeds/api/']}
def lookup_scopes(service_name):
"""Finds the scope URLs for the desired service.
In some cases, an unknown service may be used, and in those cases this
function will return None.
"""
if service_name in CLIENT_LOGIN_SCOPES:
return CLIENT_LOGIN_SCOPES[service_name]
return None
# Module level variable specifies which module should be used by GDataService
# objects to make HttpRequests. This setting can be overridden on each
# instance of GDataService.
# This module level variable is deprecated. Reassign the http_client member
# of a GDataService object instead.
http_request_handler = atom.service
class Error(Exception):
pass
class CaptchaRequired(Error):
pass
class BadAuthentication(Error):
pass
class NotAuthenticated(Error):
pass
class NonAuthSubToken(Error):
pass
class RequestError(Error):
pass
class UnexpectedReturnType(Error):
pass
class BadAuthenticationServiceURL(Error):
pass
class TokenUpgradeFailed(Error):
pass
class AuthorizationRequired(Error):
pass
class TokenHadNoScope(Error):
pass
class GDataService(atom.service.AtomService):
"""Contains elements needed for GData login and CRUD request headers.
Maintains additional headers (tokens for example) needed for the GData
services to allow a user to perform inserts, updates, and deletes.
"""
# The hander member is deprecated, use http_client instead.
handler = None
# The auth_token member is deprecated, use the token_store instead.
auth_token = None
# The tokens dict is deprecated in favor of the token_store.
tokens = None
def __init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE',
service=None, auth_service_url=None, source=None, server=None,
additional_headers=None, handler=None, tokens=None,
http_client=None, token_store=None):
"""Creates an object of type GDataService.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
account_type: string (optional) The type of account to use. Use
'GOOGLE' for regular Google accounts or 'HOSTED' for Google
Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED
account first and, if it doesn't exist, try finding a regular
GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'.
service: string (optional) The desired service for which credentials
will be obtained.
auth_service_url: string (optional) User-defined auth token request URL
allows users to explicitly specify where to send auth token requests.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
additional_headers: dictionary (optional) Any additional headers which
should be included with CRUD operations.
handler: module (optional) This parameter is deprecated and has been
replaced by http_client.
tokens: This parameter is deprecated, calls should be made to
token_store instead.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
atom.service.AtomService.__init__(self, http_client=http_client,
token_store=token_store)
self.email = email
self.password = password
self.account_type = account_type
self.service = service
self.auth_service_url = auth_service_url
self.server = server
self.additional_headers = additional_headers or {}
self.__SetSource(source)
self.__captcha_token = None
self.__captcha_url = None
self.__gsessionid = None
# Define properties for GDataService
def _SetAuthSubToken(self, auth_token, scopes=None):
"""Deprecated, use SetAuthSubToken instead."""
self.SetAuthSubToken(auth_token, scopes=scopes)
def __SetAuthSubToken(self, auth_token, scopes=None):
"""Deprecated, use SetAuthSubToken instead."""
self._SetAuthSubToken(auth_token, scopes=scopes)
def _GetAuthToken(self):
"""Returns the auth token used for authenticating requests.
Returns:
string
"""
current_scopes = lookup_scopes(self.service, use_default=False)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if hasattr(token, 'auth_header'):
return token.auth_header
return None
def _GetCaptchaToken(self):
"""Returns a captcha token if the most recent login attempt generated one.
The captcha token is only set if the Programmatic Login attempt failed
because the Google service issued a captcha challenge.
Returns:
string
"""
return self.__captcha_token
def __GetCaptchaToken(self):
return self._GetCaptchaToken()
captcha_token = property(__GetCaptchaToken,
doc="""Get the captcha token for a login request.""")
def _GetCaptchaURL(self):
"""Returns the URL of the captcha image if a login attempt generated one.
The captcha URL is only set if the Programmatic Login attempt failed
because the Google service issued a captcha challenge.
Returns:
string
"""
return self.__captcha_url
def __GetCaptchaURL(self):
return self._GetCaptchaURL()
captcha_url = property(__GetCaptchaURL,
doc="""Get the captcha URL for a login request.""")
def GetAuthSubToken(self):
"""Returns the AuthSub Token after removing the AuthSub Authorization
Label.
The AuthSub Authorization Label reads: "AuthSub token"
Returns:
If the AuthSub Token is set AND it begins with the AuthSub
Authorization Label, the AuthSub Token is returned minus the AuthSub
Label. If the AuthSub Token does not start with the AuthSub
Authorization Label or it is not set, None is returned.
"""
current_scopes = lookup_scopes(self.service)#, use_default=False)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.AuthSubToken):
return token.get_token_string()
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
return None
def SetAuthSubToken(self, token, scopes=None):
"""Sets the token sent in requests to an AuthSub token.
Only use this method if you have received a token from the AuthSub
service. The authi token is set automatically when UpgradeToSessionToken()
is used. See documentation for Google AuthSub here:
http://code.google.com/apis/accounts/AuthForWebApps.html
Args:
token: gdata.auth.AuthSubToken or string The token returned by the
AuthSub service. If the token is an AuthSubToken, the scope
information stored in the AuthSubToken is used. If the token
is a string, the scopes parameter is used to determine the
valid scopes.
scopes: list of URLs for which the token is valid. This is only used
if the token parameter is a string.
"""
if not isinstance(token, gdata.auth.AuthSubToken):
token_string = token
token = gdata.auth.AuthSubToken()
token.set_token_string(token_string)
# If no scopes were set for the token, use the scopes passed in, or
# try to determine the scopes based on the current service name. If
# all else fails, set the token to match all requests.
if not token.scopes:
if scopes is None:
scopes = lookup_scopes(self.service)
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
token.scopes = scopes
self.token_store.add_token(token)
def GetClientLoginToken(self):
"""Returns the token string for the current request scope.
The current scope is determined by the service name string member.
The token string is the end of the Authorization header, it doesn not
include the ClientLogin label.
"""
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
return None
def SetClientLoginToken(self, token, scopes=None):
"""Sets the token sent in requests to an ClientLogin token.
Only use this method if you have received a token from the ClientLogin
service. The auth_token is set automatically when ProgrammaticLogin()
is used. See documentation for Google ClientLogin here:
http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html
Args:
token: string The token returned by the ClientLogin service.
"""
if not isinstance(token, gdata.auth.ClientLoginToken):
token_string = token
token = gdata.auth.ClientLoginToken()
token.set_token_string(token_string)
if not token.scopes:
if scopes is None:
scopes = lookup_scopes(self.service)
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
token.scopes = scopes
self.token_store.add_token(token)
# Private methods to create the source property.
def __GetSource(self):
return self.__source
def __SetSource(self, new_source):
self.__source = new_source
# Update the UserAgent header to include the new application name.
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
self.__source,)
source = property(__GetSource, __SetSource,
doc="""The source is the name of the application making the request.
It should be in the form company_id-app_name-app_version""")
# Authentication operations
def ProgrammaticLogin(self, captcha_token=None, captcha_response=None):
"""Authenticates the user and sets the GData Auth token.
Login retreives a temporary auth token which must be used with all
requests to GData services. The auth token is stored in the GData client
object.
Login is also used to respond to a captcha challenge. If the user's login
attempt failed with a CaptchaRequired error, the user can respond by
calling Login with the captcha token and the answer to the challenge.
Args:
captcha_token: string (optional) The identifier for the captcha challenge
which was presented to the user.
captcha_response: string (optional) The user's answer to the captch
challenge.
Raises:
CaptchaRequired if the login service will require a captcha response
BadAuthentication if the login service rejected the username or password
Error if the login service responded with a 403 different from the above
"""
request_body = gdata.auth.generate_client_login_request_body(self.email,
self.password, self.service, self.source, self.account_type,
captcha_token, captcha_response)
# If the user has defined their own authentication service URL,
# send the ClientLogin requests to this URL:
if not self.auth_service_url:
auth_request_url = AUTH_SERVER_HOST + '/accounts/ClientLogin'
else:
auth_request_url = self.auth_service_url
auth_response = self.http_client.request('POST', auth_request_url,
data=request_body,
headers={'Content-Type':'application/x-www-form-urlencoded'})
response_body = auth_response.read()
if auth_response.status == 200:
# TODO: insert the token into the token_store directly.
self.SetClientLoginToken(
gdata.auth.get_client_login_token(response_body))
self.__captcha_token = None
self.__captcha_url = None
elif auth_response.status == 403:
# Examine each line to find the error type and the captcha token and
# captch URL if they are present.
captcha_parameters = gdata.auth.get_captcha_challenge(response_body,
captcha_base_url='%s/accounts/' % AUTH_SERVER_HOST)
if captcha_parameters:
self.__captcha_token = captcha_parameters['token']
self.__captcha_url = captcha_parameters['url']
raise CaptchaRequired, 'Captcha Required'
elif response_body.splitlines()[0] == 'Error=BadAuthentication':
self.__captcha_token = None
self.__captcha_url = None
raise BadAuthentication, 'Incorrect username or password'
else:
self.__captcha_token = None
self.__captcha_url = None
raise Error, 'Server responded with a 403 code'
elif auth_response.status == 302:
self.__captcha_token = None
self.__captcha_url = None
# Google tries to redirect all bad URLs back to
# http://www.google.<locale>. If a redirect
# attempt is made, assume the user has supplied an incorrect authentication URL
raise BadAuthenticationServiceURL, 'Server responded with a 302 code.'
def ClientLogin(self, username, password, account_type=None, service=None,
auth_service_url=None, source=None, captcha_token=None,
captcha_response=None):
"""Convenience method for authenticating using ProgrammaticLogin.
Sets values for email, password, and other optional members.
Args:
username:
password:
account_type: string (optional)
service: string (optional)
auth_service_url: string (optional)
captcha_token: string (optional)
captcha_response: string (optional)
"""
self.email = username
self.password = password
if account_type:
self.account_type = account_type
if service:
self.service = service
if source:
self.source = source
if auth_service_url:
self.auth_service_url = auth_service_url
self.ProgrammaticLogin(captcha_token, captcha_response)
def GenerateAuthSubURL(self, next, scope, secure=False, session=True,
domain='default'):
"""Generate a URL at which the user will login and be redirected back.
Users enter their credentials on a Google login page and a token is sent
to the URL specified in next. See documentation for AuthSub login at:
http://code.google.com/apis/accounts/AuthForWebApps.html
Args:
next: string The URL user will be sent to after logging in.
scope: string or list of strings. The URLs of the services to be
accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
"""
if not isinstance(scope, (list, tuple)):
scope = (scope,)
return gdata.auth.generate_auth_sub_url(next, scope, secure=secure,
session=session,
request_url='%s/accounts/AuthSubRequest' % AUTH_SERVER_HOST,
domain=domain)
def UpgradeToSessionToken(self, token=None):
"""Upgrades a single use AuthSub token to a session token.
Args:
token: A gdata.auth.AuthSubToken (optional) which is good for a single
use but can be upgraded to a session token. If no token is
passed in, the AuthSubToken is found by looking in the
token_store by looking for a token for the current scope.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
TokenUpgradeFailed if the server responded to the request with an
error.
"""
if token is None:
scopes = lookup_scopes(self.service)
if scopes:
token = self.token_store.find_token(scopes[0])
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
self.SetAuthSubToken(self.upgrade_to_session_token(token))
def upgrade_to_session_token(self, token):
"""Upgrades a single use AuthSub token to a session token.
Args:
token: A gdata.auth.AuthSubToken (optional) which is good for a single
use but can be upgraded to a session token.
Returns:
The upgraded token as a gdata.auth.AuthSubToken object.
Raises:
TokenUpgradeFailed if the server responded to the request with an
error.
"""
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken',
headers={'Content-Type':'application/x-www-form-urlencoded'})
response_body = response.read()
if response.status == 200:
token.set_token_string(
gdata.auth.token_from_http_body(response_body))
return token
else:
raise TokenUpgradeFailed({'status': response.status,
'reason': 'Non 200 response on upgrade',
'body': response_body})
def RevokeAuthSubToken(self):
"""Revokes an existing AuthSub token.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
"""
scopes = lookup_scopes(self.service)
token = self.token_store.find_token(scopes)
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken',
headers={'Content-Type':'application/x-www-form-urlencoded'})
if response.status == 200:
self.token_store.remove_token(token)
# CRUD operations
def Get(self, uri, extra_headers=None, redirects_remaining=4,
encoding='UTF-8', converter=None):
"""Query the GData API with the given URI
The uri is the portion of the URI after the server value
(ex: www.google.com).
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
redirects_remaining: int (optional) Tracks the number of additional
redirects this method will allow. If the service object receives
a redirect and remaining is 0, it will not follow the redirect.
This was added to avoid infinite redirect loops.
encoding: string (optional) The character encoding for the server's
response. Default is UTF-8
converter: func (optional) A function which will transform
the server's results before it is returned. Example: use
GDataFeedFromString to parse the server response as if it
were a GDataFeed.
Returns:
If there is no ResultsTransformer specified in the call, a GDataFeed
or GDataEntry depending on which is sent from the server. If the
response is niether a feed or entry and there is no ResultsTransformer,
return a string. If there is a ResultsTransformer, the returned value
will be that of the ResultsTransformer function.
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if uri.find('?') > -1:
uri += '&gsessionid=%s' % (self.__gsessionid,)
else:
uri += '?gsessionid=%s' % (self.__gsessionid,)
server_response = self.request('GET', uri,
headers=extra_headers)
result_body = server_response.read()
if server_response.status == 200:
if converter:
return converter(result_body)
# There was no ResultsTransformer specified, so try to convert the
# server's response into a GDataFeed.
feed = gdata.GDataFeedFromString(result_body)
if not feed:
# If conversion to a GDataFeed failed, try to convert the server's
# response to a GDataEntry.
entry = gdata.GDataEntryFromString(result_body)
if not entry:
# The server's response wasn't a feed, or an entry, so return the
# response body as a string.
return result_body
return entry
return feed
elif server_response.status == 302:
if redirects_remaining > 0:
location = server_response.getheader('Location')
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.Get(self, location, extra_headers, redirects_remaining - 1,
encoding=encoding, converter=converter)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def GetMedia(self, uri, extra_headers=None):
"""Returns a MediaSource containing media and its metadata from the given
URI string.
"""
response_handle = self.request('GET', uri,
headers=extra_headers)
return gdata.MediaSource(response_handle, response_handle.getheader(
'Content-Type'),
response_handle.getheader('Content-Length'))
def GetEntry(self, uri, extra_headers=None):
"""Query the GData API with the given URI and receive an Entry.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataEntry built from the XML in the server's response.
"""
result = GDataService.Get(self, uri, extra_headers,
converter=atom.EntryFromString)
if isinstance(result, atom.Entry):
return result
else:
raise UnexpectedReturnType, 'Server did not send an entry'
def GetFeed(self, uri, extra_headers=None,
converter=gdata.GDataFeedFromString):
"""Query the GData API with the given URI and receive a Feed.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataFeed built from the XML in the server's response.
"""
result = GDataService.Get(self, uri, extra_headers, converter=converter)
if isinstance(result, atom.Feed):
return result
else:
raise UnexpectedReturnType, 'Server did not send a feed'
def GetNext(self, feed):
"""Requests the next 'page' of results in the feed.
This method uses the feed's next link to request an additional feed
and uses the class of the feed to convert the results of the GET request.
Args:
feed: atom.Feed or a subclass. The feed should contain a next link and
the type of the feed will be applied to the results from the
server. The new feed which is returned will be of the same class
as this feed which was passed in.
Returns:
A new feed representing the next set of results in the server's feed.
The type of this feed will match that of the feed argument.
"""
next_link = feed.GetNextLink()
# Create a closure which will convert an XML string to the class of
# the feed object passed in.
def ConvertToFeedClass(xml_string):
return atom.CreateClassFromXMLString(feed.__class__, xml_string)
# Make a GET request on the next link and use the above closure for the
# converted which processes the XML string from the server.
if next_link and next_link.href:
return GDataService.Get(self, next_link.href,
converter=ConvertToFeedClass)
else:
return None
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert or update data into a GData service at the given URI.
Args:
data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the post succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return GDataService.PostOrPut(self, 'POST', data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert data into a GData service at the given URI.
Args:
verb: string, either 'POST' or 'PUT'
data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the post succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if uri.find('?') > -1:
uri += '&gsessionid=%s' % (self.__gsessionid,)
else:
uri += '?gsessionid=%s' % (self.__gsessionid,)
if data and media_source:
if ElementTree.iselement(data):
data_str = ElementTree.tostring(data)
else:
data_str = str(data)
multipart = []
multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \
'Content-Type: application/atom+xml\r\n\r\n')
multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \
media_source.content_type+'\r\n\r\n')
multipart.append('\r\n--END_OF_PART--\r\n')
extra_headers['MIME-version'] = '1.0'
extra_headers['Content-Length'] = str(len(multipart[0]) +
len(multipart[1]) + len(multipart[2]) +
len(data_str) + media_source.content_length)
extra_headers['Content-Type'] = 'multipart/related; boundary=END_OF_PART'
server_response = self.request(verb, uri,
data=[multipart[0], data_str, multipart[1], media_source.file_handle,
multipart[2]], headers=extra_headers)
result_body = server_response.read()
elif media_source or isinstance(data, gdata.MediaSource):
if isinstance(data, gdata.MediaSource):
media_source = data
extra_headers['Content-Length'] = str(media_source.content_length)
extra_headers['Content-Type'] = media_source.content_type
server_response = self.request(verb, uri,
data=media_source.file_handle, headers=extra_headers)
result_body = server_response.read()
else:
http_data = data
content_type = 'application/atom+xml'
extra_headers['Content-Type'] = content_type
server_response = self.request(verb, uri, data=http_data,
headers=extra_headers)
result_body = server_response.read()
# Server returns 201 for most post requests, but when performing a batch
# request the server responds with a 200 on success.
if server_response.status == 201 or server_response.status == 200:
if converter:
return converter(result_body)
feed = gdata.GDataFeedFromString(result_body)
if not feed:
entry = gdata.GDataEntryFromString(result_body)
if not entry:
return result_body
return entry
return feed
elif server_response.status == 302:
if redirects_remaining > 0:
location = server_response.getheader('Location')
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.Post(self, data, location, extra_headers,
url_params, escape_params, redirects_remaining - 1,
media_source, converter=converter)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=3, media_source=None,
converter=None):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the put succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return GDataService.PostOrPut(self, 'PUT', data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
True if the entry was deleted.
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if uri.find('?') > -1:
uri += '&gsessionid=%s' % (self.__gsessionid,)
else:
uri += '?gsessionid=%s' % (self.__gsessionid,)
server_response = self.request('DELETE', uri,
headers=extra_headers)
result_body = server_response.read()
if server_response.status == 200:
return True
elif server_response.status == 302:
if redirects_remaining > 0:
location = server_response.getheader('Location')
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.Delete(self, location, extra_headers,
url_params, escape_params, redirects_remaining - 1)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def ExtractToken(url, scopes_included_in_next=True):
"""Gets the AuthSub token from the current page's URL.
Designed to be used on the URL that the browser is sent to after the user
authorizes this application at the page given by GenerateAuthSubRequestUrl.
Args:
url: The current page's URL. It should contain the token as a URL
parameter. Example: 'http://example.com/?...&token=abcd435'
scopes_included_in_next: If True, this function looks for a scope value
associated with the token. The scope is a URL parameter with the
key set to SCOPE_URL_PARAM_NAME. This parameter should be present
if the AuthSub request URL was generated using
GenerateAuthSubRequestUrl with include_scope_in_next set to True.
Returns:
A tuple containing the token string and a list of scope strings for which
this token should be valid. If the scope was not included in the URL, the
tuple will contain (token, None).
"""
parsed = urlparse.urlparse(url)
token = gdata.auth.AuthSubTokenFromUrl(parsed[4])
scopes = ''
if scopes_included_in_next:
for pair in parsed[4].split('&'):
if pair.startswith('%s=' % SCOPE_URL_PARAM_NAME):
scopes = urllib.unquote_plus(pair.split('=')[1])
return (token, scopes.split(' '))
def GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False,
session=True, request_url='http://www.google.com/accounts/AuthSubRequest',
include_scopes_in_next=True):
"""Creates a URL to request an AuthSub token to access Google services.
For more details on AuthSub, see the documentation here:
http://code.google.com/apis/accounts/docs/AuthSub.html
Args:
next: The URL where the browser should be sent after the user authorizes
the application. This page is responsible for receiving the token
which is embeded in the URL as a parameter.
scopes: The base URL to which access will be granted. Example:
'http://www.google.com/calendar/feeds' will grant access to all
URLs in the Google Calendar data API. If you would like a token for
multiple scopes, pass in a list of URL strings.
hd: The domain to which the user's account belongs. This is set to the
domain name if you are using Google Apps. Example: 'example.org'
Defaults to 'default'
secure: If set to True, all requests should be signed. The default is
False.
session: If set to True, the token received by the 'next' URL can be
upgraded to a multiuse session token. If session is set to False, the
token may only be used once and cannot be upgraded. Default is True.
request_url: The base of the URL to which the user will be sent to
authorize this application to access their data. The default is
'http://www.google.com/accounts/AuthSubRequest'.
include_scopes_in_next: Boolean if set to true, the 'next' parameter will
be modified to include the requested scope as a URL parameter. The
key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The
benefit of including the scope URL as a parameter to the next URL, is
that the page which receives the AuthSub token will be able to tell
which URLs the token grants access to.
Returns:
A URL string to which the browser should be sent.
"""
if isinstance(scopes, list):
scope = ' '.join(scopes)
else:
scope = scopes
if include_scopes_in_next:
if next.find('?') > -1:
next += '&%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope})
else:
next += '?%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope})
return gdata.auth.GenerateAuthSubUrl(next=next, scope=scope, secure=secure,
session=session, request_url=request_url, domain=hd)
class Query(dict):
"""Constructs a query URL to be used in GET requests
Url parameters are created by adding key-value pairs to this object as a
dict. For example, to add &max-results=25 to the URL do
my_query['max-results'] = 25
Category queries are created by adding category strings to the categories
member. All items in the categories list will be concatenated with the /
symbol (symbolizing a category x AND y restriction). If you would like to OR
2 categories, append them as one string with a | between the categories.
For example, do query.categories.append('Fritz|Laurie') to create a query
like this feed/-/Fritz%7CLaurie . This query will look for results in both
categories.
"""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
"""Constructor for Query
Args:
feed: str (optional) The path for the feed (Examples:
'/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full'
text_query: str (optional) The contents of the q query parameter. The
contents of the text_query are URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to the
query's items (key-value pairs).
categories: list (optional) List of category strings which should be
included as query categories. See
http://code.google.com/apis/gdata/reference.html#Queries for
details. If you want to get results from category A or B (both
categories), specify a single list item 'A|B'.
"""
self.feed = feed
self.categories = []
if text_query:
self.text_query = text_query
if isinstance(params, dict):
for param in params:
self[param] = params[param]
if isinstance(categories, list):
for category in categories:
self.categories.append(category)
def _GetTextQuery(self):
if 'q' in self.keys():
return self['q']
else:
return None
def _SetTextQuery(self, query):
self['q'] = query
text_query = property(_GetTextQuery, _SetTextQuery,
doc="""The feed query's q parameter""")
def _GetAuthor(self):
if 'author' in self.keys():
return self['author']
else:
return None
def _SetAuthor(self, query):
self['author'] = query
author = property(_GetAuthor, _SetAuthor,
doc="""The feed query's author parameter""")
def _GetAlt(self):
if 'alt' in self.keys():
return self['alt']
else:
return None
def _SetAlt(self, query):
self['alt'] = query
alt = property(_GetAlt, _SetAlt,
doc="""The feed query's alt parameter""")
def _GetUpdatedMin(self):
if 'updated-min' in self.keys():
return self['updated-min']
else:
return None
def _SetUpdatedMin(self, query):
self['updated-min'] = query
updated_min = property(_GetUpdatedMin, _SetUpdatedMin,
doc="""The feed query's updated-min parameter""")
def _GetUpdatedMax(self):
if 'updated-max' in self.keys():
return self['updated-max']
else:
return None
def _SetUpdatedMax(self, query):
self['updated-max'] = query
updated_max = property(_GetUpdatedMax, _SetUpdatedMax,
doc="""The feed query's updated-max parameter""")
def _GetPublishedMin(self):
if 'published-min' in self.keys():
return self['published-min']
else:
return None
def _SetPublishedMin(self, query):
self['published-min'] = query
published_min = property(_GetPublishedMin, _SetPublishedMin,
doc="""The feed query's published-min parameter""")
def _GetPublishedMax(self):
if 'published-max' in self.keys():
return self['published-max']
else:
return None
def _SetPublishedMax(self, query):
self['published-max'] = query
published_max = property(_GetPublishedMax, _SetPublishedMax,
doc="""The feed query's published-max parameter""")
def _GetStartIndex(self):
if 'start-index' in self.keys():
return self['start-index']
else:
return None
def _SetStartIndex(self, query):
if not isinstance(query, str):
query = str(query)
self['start-index'] = query
start_index = property(_GetStartIndex, _SetStartIndex,
doc="""The feed query's start-index parameter""")
def _GetMaxResults(self):
if 'max-results' in self.keys():
return self['max-results']
else:
return None
def _SetMaxResults(self, query):
if not isinstance(query, str):
query = str(query)
self['max-results'] = query
max_results = property(_GetMaxResults, _SetMaxResults,
doc="""The feed query's max-results parameter""")
def _GetOrderBy(self):
if 'orderby' in self.keys():
return self['orderby']
else:
return None
def _SetOrderBy(self, query):
self['orderby'] = query
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The feed query's orderby parameter""")
def ToUri(self):
q_feed = self.feed or ''
category_string = '/'.join(
[urllib.quote_plus(c) for c in self.categories])
# Add categories to the feed if there are any.
if len(self.categories) > 0:
q_feed = q_feed + '/-/' + category_string
return atom.service.BuildUri(q_feed, self)
def __str__(self):
return self.ToUri()
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2008, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.7a"
__copyright__ = "Copyright (c) 2004-2008 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif isList(matchAgainst):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
import cgi
import datetime
import wsgiref.handlers
import gdata.service
from src.handlers.welcome import Welcome
from src.handlers.auth import AuthHandler
from src.handlers.test import *
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
application = webapp.WSGIApplication([
('/', Welcome),
('/test', Test),
('/testCrawler', TestCrawler),
('/auth', AuthHandler)
], debug=True)
def main():
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
import cgi
import datetime
import wsgiref.handlers
import gdata.service
from src.Authenticator import Authenticator
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
class AuthHandler(webapp.RequestHandler):
def get(self):
newToken = self.request.get("token")
user = users.get_current_user()
if newToken and user:
a = Authenticator(user)
a.Authenticate(newToken)
self.redirect("/")
| Python |
import wsgiref.handlers
from src.BeautifulSoup import BeautifulSoup
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
class Test(webapp.RequestHandler):
def get(self):
self.response.out.write("<ul>")
self.response.out.write(r"<li><a href=""/testCrawler"">Test crawler</a></li>")
self.response.out.write("</ul>")
class TestCrawler(webapp.RequestHandler):
_url = "http://www.google.ru"
def get(self):
result = urlfetch.fetch(self._url, None, urlfetch.GET, {'Accept-Charset':'utf-8'})
soup = BeautifulSoup(result.content)
links = soup('a')
self.response.out.write(links)
class TestXslt(webapp.RequestHandler):
def get(self):
return | Python |
import cgi
import os
import datetime
import wsgiref.handlers
import gdata.service
from src.Authenticator import Authenticator
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class UserIndex():
def renderTemplate(self, response):
user = users.get_current_user()
auth = Authenticator(user)
try:
auth.client.SetAuthSubToken(auth.Profile.token)
results = auth.client.Get("http://docs.google.com/feeds/documents/private/full")
except gdata.service.RequestError, request_error:
results = request_error[0]["body"]
template_values = {
'user': users.get_current_user(),
'profile': auth.Profile,
'auth_url': auth.Link,
'logout_url': users.create_logout_url("/"),
'results': results
}
path = os.path.join(os.path.dirname(__file__), '../../templates/index.html')
response.out.write(template.render(path, template_values))
class GuestIndex():
def renderTemplate(self, response):
template_values = {
'login_url': users.create_login_url("/"),
}
path = os.path.join(os.path.dirname(__file__), '../../templates/welcome.html')
response.out.write(template.render(path, template_values))
class Welcome(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
page = UserIndex()
else:
page = GuestIndex()
page.renderTemplate(self.response)
| Python |
from google.appengine.ext import db
class Profile(db.Model):
user = db.UserProperty()
token = db.StringProperty(required = False) | Python |
import urllib
import gdata.service
import gdata.alt.appengine
from src.Profile import Profile
from google.appengine.ext import db
class Authenticator(object):
# constants
TOKEN_SCOPE = "http://docs.google.com/feeds/"
HOST_NAME = "usverg-er.appspot.com"
BACK_HANDLER = "auth"
#private fields
_profile = None
# constructor
def __init__(self, user):
self.user = user
self.client = gdata.service.GDataService()
gdata.alt.appengine.run_on_appengine(self.client)
self.client.SetAuthSubToken(Profile.token)
# getters-setters
def _getLink(self):
backurl = "http://" + self.HOST_NAME + "/" + self.BACK_HANDLER + "?token_scope=" + self.TOKEN_SCOPE
return self.client.GenerateAuthSubURL(backurl, self.TOKEN_SCOPE, secure=False, session=True)
def _getProfile(self):
if self._profile:
return self._profile
profile = db.GqlQuery('SELECT * FROM Profile WHERE user = :1', self.user).get()
if not profile:
profile = Profile(user=self.user, token="")
self._profile = profile
return self._profile
# public methods
def Authenticate(self, newToken):
self.client.SetAuthSubToken(newToken)
self.client.UpgradeToSessionToken()
self.Profile.token = self.client.GetAuthSubToken()
self.Profile.put()
#properties
Link = property(fget=_getLink)
Profile = property(fget=_getProfile)
| Python |
# commonplugs/todo.py
#
#
""" manage todo lists per users .. a time/data string can be provided to set
time on a todo item.
"""
## gozerlib imports
from gozerlib.utils.generic import getwho
from gozerlib.utils.timeutils import strtotime, striptime, today
from gozerlib.utils.locking import lockdec
from gozerlib.utils.exception import handle_exception
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.users import users
from gozerlib.datadir import datadir
from gozerlib.persist import PlugPersist
from gozerlib.persiststate import UserState
from gozerlib.config import cfg as config
from gozerlib.utils.lazydict import LazyDict
## basic imports
import time
import thread
import os
from datetime import datetime, timedelta
from time import localtime
## defines
todolock = thread.allocate_lock()
locked = lockdec(todolock)
class Todo(LazyDict):
pass
class TodoList(UserState):
def __init__(self, name, *args, **kwargs):
UserState.__init__(self, name, "todo", *args, **kwargs)
if self.data.list:
self.data.list = [LazyDict(x) for x in self.data.list]
else:
self.data.list = []
def add(self, txt, ttime=0, duration=0, warnsec=0, priority=0):
""" add a todo """
todo = Todo()
todo.time = ttime
todo.duration = duration
todo.warnsec = warnsec
todo.priority = priority
todo.txt = txt.strip()
self.data.list.append(todo)
self.save()
return len(self.data.list)
def delete(self, indexnr):
del self.data.list[indexnr]
self.save()
return self
def clear(self):
self.data.list = []
self.save()
return self
def toolate(self):
res = []
now = time.time()
for todo in self.data.list:
if todo.time < now:
res.append(todo)
return res
def withintime(self, before, after):
res = []
for todo in self.data.list:
if todo.time > before and todo.time < after:
res.append(todo)
return res
def timetodo(self):
min = 0
res = []
for todo in self.data.list:
if todo.time > min:
res.append(todo)
return res
def handle_todo(bot, ievent):
""" todo [<item>] .. show todo's or set todo item .. a time/date can be \
given"""
if len(ievent.args) > 0:
handle_todo2(bot, ievent)
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
try:
todoos = TodoList(name).data.list
except KeyError:
ievent.reply('i dont have todo info for %s' % user.name)
return
saytodo(bot, ievent, todoos)
def handle_todo2(bot, ievent):
""" set todo item """
if not ievent.rest:
ievent.missing("<what>")
return
else:
what = ievent.rest
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
ttime = strtotime(what)
nr = 0
todo = TodoList(name)
if not ttime == None:
ievent.reply('time detected ' + time.ctime(ttime))
nr = todo.add(what, ttime)
else:
nr = todo.add(what)
ievent.reply('todo item %s added' % nr)
cmnds.add('todo', handle_todo, ['USER', 'GUEST'])
examples.add('todo', 'todo [<item>] .. show todo items or add a todo item', \
'1) todo 2) todo program the bot 3) todo 22:00 sleep')
def handle_tododone(bot, ievent):
""" todo-done <listofnrs> .. remove todo items """
if len(ievent.args) == 0:
ievent.missing('<list of nrs>')
return
try:
nrs = []
for i in ievent.args:
nrs.append(int(i))
nrs.sort()
except ValueError:
ievent.reply('%s is not an integer' % i)
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
nrdone = 0
failed = []
todo = TodoList(name)
for i in nrs[::-1]:
try:
del todo.data.list[i]
nrdone += 1
except IndexError:
continue
except Exception, ex:
failed.append(str(i))
handle_exception()
if failed:
ievent.reply('failed to delete %s' % ' .. '.join(failed))
if nrdone == 1:
todo.save()
ievent.reply('%s item deleted' % nrdone)
elif nrdone == 0:
ievent.reply('no items deleted')
else:
todo.save()
ievent.reply('%s items deleted' % nrdone)
cmnds.add('todo-done', handle_tododone, ['USER', 'GUEST'])
examples.add('todo-done', 'todo-done <listofnrs> .. remove items from \
todo list', '1) todo-done 1 2) todo-done 3 5 8')
def handle_settodo(bot, ievent):
""" todo-set <name> <txt> .. add a todo to another user's todo list"""
try:
who = ievent.args[0]
what = ' '.join(ievent.args[1:])
except IndexError:
ievent.missing('<nick> <what>')
return
if not what:
ievent.missing('<nick> <what>')
return
userhost = getwho(bot, who)
if not userhost:
ievent.reply("can't find userhost for %s" % who)
return
whouser = bot.users.getname(userhost)
if not whouser:
ievent.reply("can't find user for %s" % userhost)
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
if not bot.users.permitted(userhost, name, 'todo'):
ievent.reply("%s doesn't permit todo sharing for %s " % \
(who, name))
return
what = "%s: %s" % (ievent.nick, what)
ttime = strtotime(what)
nr = 0
todo = TodoList(name)
if not ttime == None:
ievent.reply('time detected ' + time.ctime(ttime))
what = striptime(what)
nr = todo.add(what, ttime)
else:
nr = todo.add(what)
ievent.reply('todo item %s added' % nr)
cmnds.add('todo-set', handle_settodo, 'USER')
examples.add('todo-set', 'todo-set <nick> <txt> .. set todo item of \
<nick>', 'todo-set dunker bot proggen')
def handle_gettodo(bot, ievent):
""" todo-get <nick> .. get todo of another user """
try:
who = ievent.args[0]
except IndexError:
ievent.missing('<nick>')
return
userhost = getwho(bot, who)
if not userhost:
ievent.reply("can't find userhost for %s" % who)
return
whouser = bot.users.getname(userhost)
if not whouser:
ievent.reply("can't find user for %s" % userhost)
return
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
if not bot.users.permitted(userhost, name, 'todo'):
ievent.reply("%s doesn't permit todo sharing for %s " % (who, name))
return
todo = TodoList(name)
todoos = todo.get(whouser)
saytodo(bot, ievent, todoos)
cmnds.add('todo-get', handle_gettodo, ['USER', 'GUEST'])
examples.add('todo-get', 'todo-get <nick> .. get the todo list of \
<nick>', 'todo-get dunker')
def handle_todotime(bot, ievent):
""" todo-time .. show time related todoos """
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
todo = TodoList(name)
todoos = todo.timetodo()
saytodo(bot, ievent, todoos)
cmnds.add('todo-time', handle_todotime, ['USER', 'GUEST'])
examples.add('todo-time', 'todo-time .. show todo items with time fields', \
'todo-time')
def handle_todoweek(bot, ievent):
""" todo-week .. show time related todo items for this week """
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
todo = TodoList(name)
todoos = todo.withintime(today(), today()+7*24*60*60)
saytodo(bot, ievent, todoos)
cmnds.add('todo-week', handle_todoweek, ['USER', 'GUEST'])
examples.add('todo-week', 'todo-week .. todo items for this week', 'todo-week')
def handle_today(bot, ievent):
""" todo-today .. show time related todo items for today """
name = bot.users.getname(ievent.userhost)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
todo = TodoList(name)
now = time.time()
todoos = todo.withintime(now, now+3600*24)
saytodo(bot, ievent, todoos)
cmnds.add('todo-today', handle_today, ['USER', 'GUEST'])
examples.add('todo-today', 'todo-today .. todo items for today', 'todo-today')
def handle_tomorrow(bot, ievent):
""" todo-tomorrow .. show time related todo items for tomorrow """
username = bot.users.getname(ievent.userhost)
if not username:
ievent.reply("can't find username for %s" % ievent.userhost)
return
todo = TodoList(username)
if ievent.rest:
what = ievent.rest
ttime = strtotime(what)
if ttime != None:
if ttime < today() or ttime > today() + 24*60*60:
ievent.reply("%s is not tomorrow" % time.ctime(ttime + 24*60*60))
return
ttime += 24*60*60
ievent.reply('time detected ' + time.ctime(ttime))
what = striptime(what)
else:
ttime = today() + 42*60*60
todo.add(what, ttime)
ievent.reply('todo added')
return
todoos = todo.withintime(today()+24*60*60, today()+2*24*60*60)
saytodo(bot, ievent, todoos)
cmnds.add('todo-tomorrow', handle_tomorrow, ['USER', 'GUEST'])
examples.add('todo-tomorrow', 'todo-tomorrow .. todo items for tomorrow', \
'todo-tomorrow')
def handle_setpriority(bot, ievent):
""" todo-setprio [<channel|name>] <itemnr> <prio> .. show priority \
on todo item """
try:
(who, itemnr, prio) = ievent.args
except ValueError:
try:
(itemnr, prio) = ievent.args
who = bot.users.getname(ievent.userhost)
except ValueError:
ievent.missing('[<channe|namel>] <itemnr> <priority>')
return
if not who:
ievent.reply("can't find username for %s" % ievent.userhost)
return
try:
itemnr = int(itemnr)
prio = int(prio)
except ValueError:
ievent.missing('[<channel|name>] <itemnr> <priority>')
return
todo = TodoList(who)
try:
todo.data.list[itemnr].priority = prio
todo.save()
ievent.reply('priority set')
except IndexError:
ievent.reply("no %s item in todolist" % str(itemnr))
cmnds.add('todo-setprio', handle_setpriority, ['USER', 'GUEST'])
examples.add('todo-setprio', 'todo-setprio [<channel|name>] <itemnr> <prio> \
.. set todo priority', '1) todo-setprio #dunkbots 2 5 2) todo-setprio owner \
3 10 3) todo-setprio 2 10')
def handle_todosettime(bot, ievent):
""" todo-settime [<channel|name>] <itemnr> <timestring> .. set time \
on todo item """
ttime = strtotime(ievent.rest)
if ttime == None:
ievent.reply("can't detect time")
return
txt = striptime(ievent.rest)
try:
(who, itemnr) = txt.split()
except ValueError:
try:
(itemnr, ) = txt.split()
who = bot.users.getname(ievent.userhost)
except ValueError:
ievent.missing('[<channel|name>] <itemnr> <timestring>')
return
if not who:
ievent.reply("can't find username for %s" % ievent.userhost)
return
try:
itemnr = int(itemnr)
except ValueError:
ievent.missing('[<channel|name>] <itemnr> <timestring>')
return
todo = TodoList(who)
try:
todo.data.list[itemnr].time = ttime
todo.save()
ievent.reply('time of todo %s set to %s' % (itemnr, time.ctime(ttime)))
except IndexError:
ievent.reply("%s item in todolist" % str(itemnr))
cmnds.add('todo-settime', handle_todosettime, ['USER', 'GUEST'])
examples.add('todo-settime', 'todo-settime [<channel|name>] <itemnr> \
<timestring> .. set todo time', '1) todo-settime #dunkbots 2 13:00 2) \
todo-settime owner 3 2-2-2010 3) todo-settime 2 22:00')
def handle_getpriority(bot, ievent):
""" todo-getprio <[channel|name]> <itemnr> .. get priority of todo \
item """
try:
(who, itemnr) = ievent.args
except ValueError:
try:
itemnr = ievent.args[0]
who = bot.users.getname(ievent.userhost)
except IndexError:
ievent.missing('[<channel|name>] <itemnr>')
return
if not who:
ievent.reply("can't find username for %s" % ievent.userhost)
return
try:
itemnr = int(itemnr)
except ValueError:
ievent.missing('[<channel|name>] <itemnr>')
return
todo = TodoList(who)
try:
prio = todo.data.list[itemnr].priority
ievent.reply('priority is %s' % prio)
except IndexError:
ievent.reply("%s item in todolist" % str(itemnr))
cmnds.add('todo-getprio', handle_getpriority, ['USER', 'GUEST'])
examples.add('todo-getprio', 'todo-getprio [<channel|name>] <itemnr> .. get \
todo priority', '1) todo-getprio #dunkbots 5 2) todo-getprio 3')
def saytodo(bot, ievent, todoos):
""" output todo items of <name> """
if not todoos:
ievent.reply('nothing todo ;]')
return
result = []
now = time.time()
counter = 0
for i in todoos:
res = ""
res += "%s) " % counter
counter += 1
if i.priority:
res += "[%s] " % i.priority
if i.time and not i.time == 0:
if i.time < now:
res += 'TOO LATE: '
res += "%s %s " % (time.ctime(float(i.time)), i.txt)
else:
res += "%s " % i.txt
result.append(res.strip())
if result:
ievent.reply("todo: ", result, dot=" ")
| Python |
# plugs/gcalc.py
# encoding: utf-8
#
#
""" use google to calculate e.g. !gcalc 1 + 1 """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
## basic imports
import urllib2
## commands
def handle_gcalc(bot, ievent):
if len(ievent.args) > 0:
expr = " ".join(ievent.args).replace("+", "%2B").replace(" ", "+")
else:
ievent.missing('Missing an expression')
return
req = urllib2.Request("http://www.google.com/search?q=%s" % expr,
None,
{'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.11) Gecko/20071204 BonEcho/2.0.0.11'})
data = urllib2.urlopen(req).read()
if "<img src=/images/calc_img.gif width=40 height=30 alt=\"\">" not in data:
ievent.reply('Your expression can\'t be evaluated by the google calculator')
else:
ans = data.split("<img src=/images/calc_img.gif width=40 height=30 alt=\"\">")[1].split("<b>")[1].split("</b>")[0]
ievent.reply(ans.replace('<font size=-2> </font>', '').replace('×', '*').replace('<sup>', '**').replace('</sup>', ''))
return
cmnds.add('gcalc', handle_gcalc, ['USER', 'GUEST'])
examples.add('gcalc', 'calculate an expression using the google calculator', 'gcalc 1 + 1')
| Python |
# waveplugs/hubbub.py
#
#
""" the hubbub mantra is of the following:
use the hb-register <feedname> <url> command to register url and start a feed in in one pass.
"""
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.contrib import feedparser
from gozerlib.persist import Persist, PlugPersist
from gozerlib.utils.pdol import Pdol
from gozerlib.utils.pdod import Pdod
from gozerlib.utils.generic import jsonstring
from gozerlib.utils.lazydict import LazyDict
from gozerlib.utils.url import useragent, geturl2
from gozerlib.utils.statdict import StatDict
from gozerlib.utils.exception import handle_exception
from gozerlib.fleet import fleet
from gozerlib.config import Config, cfg
from gozerlib.channelbase import ChannelBase
from gozerlib.utils.url import posturl
from gozerlib.errors import NoSuchBotType
from gozerlib.gae.wave.waves import Wave
from gozerlib.threads import start_new_thread
## basic imports
import base64
import logging
import urllib
import urlparse
import uuid
import os
import time
## subscribe function
def subscribe(url):
subscribe_args = {
'hub.callback': urlparse.urljoin('https://feedprovider.appspot.com', '/hubbub'),
'hub.mode': 'subscribe',
'hub.topic': url,
'hub.verify': 'async',
'hub.verify_token': str(uuid.uuid4()),
}
headers = {}
import config.credentials as credentials
if credentials.HUB_CREDENTIALS:
auth_string = "Basic " + base64.b64encode("%s:%s" % tuple(credentials.HUB_CREDENTIALS))
headers['Authorization'] = auth_string
logging.warn("hubbub - subscribe - trying %s (%s)" % (credentials.HUB_URL, url))
logging.warn("hubbub - subscribe - %s" % str(headers))
response = posturl(credentials.HUB_URL, headers, subscribe_args)
return response
## tinyurl import
try:
from commonplugs.tinyurl import get_tinyurl
except ImportError:
def get_tinyurl(url):
return [url, ]
## defines
allowedtokens = ['updated', 'link', 'summary', 'tags', 'author', 'content', 'title', 'subtitle']
savelist = []
possiblemarkup = {'separator': 'set this to desired item separator', \
'all-lines': "set this to 1 if you don't want items to be aggregated", \
'tinyurl': "set this to 1 when you want to use tinyurls", 'skipmerge': \
"set this to 1 if you want to skip merge commits", 'reverse-order': 'set \
this to 1 if you want the items displayed with oldest item first'}
def find_self_url(links):
""" find home url of feed. """
for link in links:
if link.rel == 'self':
return link.href
return None
## exceptions
class NoSuchFeed(Exception):
""" there is no such feed in the feed database. """
pass
## classes
class HubbubItem(Persist):
""" item that contains hubbub data """
def __init__(self, name, url="", owner="", itemslist=['title', 'link'], watchchannels=[], running=1):
filebase = 'gozerstore' + os.sep + 'plugs' + os.sep + 'waveplugs.hubbub' + os.sep + name
Persist.__init__(self, filebase + os.sep + name + '.core')
if not self.data:
self.data = {}
self.data = LazyDict(self.data)
self.data['name'] = self.data.name or str(name)
self.data['url'] = self.data.url or str(url)
self.data['owner'] = self.data.owner or str(owner)
self.data['watchchannels'] = self.data.watchchannels or list(watchchannels)
self.data['running'] = self.data.running or running
self.itemslists = Pdol(filebase + os.sep + name + '.itemslists')
self.markup = Pdod(filebase + os.sep + name + '.markup')
def save(self):
""" save the hubbub items data. """
Persist.save(self)
self.itemslists.save()
self.markup.save()
def ownercheck(self, userhost):
""" check is userhost is the owner of the feed. """
try:
return self.data.owner == userhost
except KeyError:
pass
return False
def fetchdata(self):
""" get data of rss feed. """
url = self.data['url']
if not url:
logging.warn("hubbub - %s doesnt have url set" % self.data.name)
return []
result = feedparser.parse(url, agent=useragent())
logging.debug("hubbub - fetch - got result from %s" % url)
if result and result.has_key('bozo_exception'):
logging.info('hubbub - %s bozo_exception: %s' % (url, result['bozo_exception']))
try:
status = result.status
logging.info("hubbub - status is %s" % status)
except AttributeError:
status = 200
if status != 200 and status != 301 and status != 302:
raise RssStatus(status)
return result.entries
class HubbubWatcher(PlugPersist):
""" this watcher helps with the handling of incoming POST. also maitains
index of feed names.
"""
def __init__(self, filename):
PlugPersist.__init__(self, filename)
if not self.data:
self.data = {}
if not self.data.has_key('names'):
self.data['names'] = []
if not self.data.has_key('urls'):
self.data['urls'] = {}
if not self.data.has_key('feeds'):
self.data['feeds'] = {}
self.feeds = {}
def add(self, name, url, owner):
""" add a feed to the database. """
if not name in self.data['names']:
self.data['names'].append(name)
self.data['urls'][url] = name
self.save()
item = HubbubItem(name, url, owner)
if item.data.url != url:
return False
item.save()
self.feeds[name] = item
return True
def byname(self, name):
""" retrieve a feed by it's name. """
if name in self.feeds:
return self.feeds[name]
item = HubbubItem(name)
if item.data.url:
self.feeds[name] = item
return item
def byurl(self, url):
""" retrieve a feed by it's url. """
try:
name = self.data['urls'][url]
return self.byname(name)
except KeyError:
return
def cloneurl(self, url, auth):
""" add feeds from remote url. """
data = geturl2(url)
got = []
for line in data.split('\n'):
try:
(name, url) = line.split()
except ValueError:
logging.debug("hubbub - cloneurl - can't split %s line" % line)
continue
if url.endswith('<br>'):
url = url[:-4]
self.add(name, url, auth)
got.append(name)
return got
def watch(self, name):
""" make feed ready for watching and mark it running. """
logging.debug('trying %s hubbub feed' % name)
item = self.byname(name)
if item == None:
raise NoSuchFeed(name)
if not item.data.running:
item.data.running = 1
item.data.stoprunning = 0
item.save()
subscribe(item.data['url'])
logging.info('hubbub - started %s watch' % name)
def work(self, botname, type, channel, entries, url, *args, **kwargs):
try:
item = self.byurl(url)
name = item.data.name
try:
bot = fleet.byname(botname)
if not bot and type == 'wave' and 'wave' in botname:
bot = fleet.makewavebot(botname)
if not bot and type:
bot = fleet.makebot(type=type)
if not bot:
bot = fleet.makebot('xmpp')
except NoSuchBotType, ex:
logging.warn("hubbub - %s" % str(ex))
return
if not bot:
logging.error("hubbub - can't find %s bot in fleet" % type)
return
res2 = entries
if not res2:
logging.info("no updates for %s (%s) feed available" % (item.data.name, channel))
return
if item.markup.get(jsonstring([name, channel]), 'reverse-order'):
res2 = res2[::-1]
if item.markup.get(jsonstring([name, channel]), 'all-lines'):
for i in res2:
response = self.makeresponse(name, [i, ], channel)
try:
bot.say(channel, response)
except Exception, ex:
handle_exception()
else:
sep = item.markup.get(jsonstring([name, channel]), 'separator')
if sep:
response = self.makeresponse(name, res2, channel, sep=sep)
else:
response = self.makeresponse(name, res2, channel)
bot.say(channel, response)
except Exception, ex:
handle_exception()
def incoming(self, data):
""" process incoming hubbub data. """
result = feedparser.parse(data)
url = find_self_url(result.feed.links)
logging.info("hubbub - in - %s - %s" % (url, data))
try:
item = self.byurl(url)
if not item:
logging.warn("hubbub - can't find feed for %s" % url)
return
if not item.data.running:
logging.warn("hubbub - %s is not in running mode" % item.data.url)
return
if not item.data.url or item.data.url == 'urlnotset':
item.data.url = url
item.save()
if item:
loopover = item.data.watchchannels
name = item.data.name
else:
logging.warn("hubbub - can't find %s item" % url)
return
logging.debug("loopover in %s peek is: %s" % (name, loopover))
counter = 1
for i in loopover:
if len(i) == 3:
try:
(botname, type, channel) = i
except:
try:
(botname, type, channel) = loads(i)
except:
logging.info('hubbub - %s is not in the format (botname, bottype, channel)' % str(item))
continue
else:
logging.debug('hubbub - %s is not in the format (botname, bottype, channel)' % item.data.url)
continue
if type == 'wave':
wave = Wave(channel)
if wave and wave.data.json_data:
start_new_thread(work, (botname, type, channel, result.entries, url), {"_countdown": counter})
else:
logging.warn("hubbub - skipping %s - not joined" % channel)
else:
start_new_thread(work, (botname, type, channel, result.entries, url), {"_countdown": counter})
counter += 1
except Exception, ex:
handle_exception(txt=name)
return True
def getall(self):
""" reconstruct all feeditems into self.feeds. """
for name in self.data['names']:
self.feeds[name] = HubbubItem(name)
return self.feeds
def ownercheck(self, name, userhost):
""" check if userhost is the owner of feed. """
try:
return self.byname(name).ownercheck(userhost)
except (KeyError, AttributeError):
pass
return False
def makeresult(self, name, target, data):
""" make a result (txt) of a feed depending on its itemlist (tokens)
and markup.
"""
item = self.byname(name)
res = []
for j in data:
tmp = {}
if not item.itemslists.data[jsonstring([name, target])]:
return []
for i in item.itemslists.data[jsonstring([name, target])]:
try:
tmp[i] = unicode(j[i])
except KeyError:
continue
res.append(tmp)
return res
def makeresponse(self, name, res, channel, sep=" .. "):
""" loop over result to make a response. """
item = self.byname(name)
result = u"[%s] - " % name
try:
itemslist = item.itemslists.data[jsonstring([name, channel])]
except KeyError:
item = self.byname(name)
if item == None:
return "no %s rss item" % name
else:
item.itemslists.data[jsonstring([name, channel])] = ['title', 'link']
item.itemslists.save()
for j in res:
if item.markup.get(jsonstring([name, channel]), 'skipmerge') and 'Merge branch' in j['title']:
continue
resultstr = u""
for i in item.itemslists.data[jsonstring([name, channel])]:
try:
ii = getattr(j, i)
if not ii:
continue
ii = unicode(ii)
if ii.startswith('http://'):
if item.markup.get(jsonstring([name, channel]), 'tinyurl'):
try:
tinyurl = get_tinyurl(ii)
logging.debug('rss - tinyurl is: %s' % str(tinyurl))
if not tinyurl:
resultstr += u"%s - " % ii
else:
resultstr += u"%s - " % tinyurl[0]
except Exception, ex:
handle_exception()
resultstr += u"%s - " % item
else:
resultstr += u"%s - " % ii
else:
resultstr += u"%s - " % ii.strip()
except (KeyError, AttributeError), ex:
logging.info('hubbub - %s - %s' % (name, str(ex)))
continue
resultstr = resultstr[:-3]
if resultstr:
result += u"%s %s " % (resultstr, sep)
return result[:-(len(sep)+2)]
def stopwatch(self, name):
""" disable running status of the feed. """
try:
feed = self.byname(name)
feed.data.running = 0
feed.save()
return True
except KeyError:
return False
def list(self):
""" return feed names. """
feeds = self.data['names']
return feeds
def runners(self):
""" show names/channels of running watchers. """
result = []
for name in self.data['names']:
z = self.byname(name)
if z.data.running == 1 and not z.data.stoprunning:
result.append((z.data.name, z.data.watchchannels))
return result
def listfeeds(self, botname, type, channel):
""" show names/channels of running watcher. """
result = []
for name in self.data['names']:
z = self.byname(name)
if not z or not z.data.running:
continue
if jsonstring([botname, type, channel]) in z.data.watchchannels or [botname, type, channel] in z.data.watchchannels:
result.append(z.data.name)
return result
def getfeeds(self, channel, type=None):
""" get all feeds running in a channel. """
if type and type == "wave":
chan = Wave(channel)
else:
chan = ChannelBase(channel)
return chan.data.feeds
def url(self, name):
""" return url of a feed. """
feed = self.byname(name)
if feed:
return feed.data.url
def seturl(self, name, url):
""" set url of hubbub feed. """
feed = self.byname(name)
feed.data.url = url
feed.save()
return True
def fetchdata(self, name):
""" fetch the feed ourselves instead of receiving push items. """
return self.byname(name).fetchdata()
def scan(self, name):
""" scan a rss url for tokens. """
keys = []
items = self.fetchdata(name)
for item in items:
for key in item:
if key in allowedtokens:
keys.append(key)
statdict = StatDict()
for key in keys:
statdict.upitem(key)
return statdict.top()
def startwatchers(self):
""" enable all runnable feeds """
for name in self.data['names']:
z = self.byname(name)
if z.data.running:
self.watch(z.data.name)
def start(self, botname, type, name, channel):
""" start a feed in a channel. """
item = self.byname(name)
if not item:
logging.info("we don't have a %s feed" % name)
return False
target = channel
if not jsonstring([botname, type, target]) in item.data.watchchannels and not [botname, type, target] in item.data.watchchannels:
item.data.watchchannels.append([botname, type, target])
item.itemslists.data[jsonstring([name, target])] = ['title', 'link']
item.markup.set(jsonstring([name, target]), 'tinyurl', 1)
item.data.running = 1
item.data.stoprunning = 0
item.save()
watcher.watch(name)
logging.debug("putting feedname %s in target: %s" % (name, target))
if type == "wave":
chan = Wave(target)
else:
chan = ChannelBase(target)
if not name in chan.data.feeds:
chan.data.feeds.append(name)
chan.save()
logging.debug("hubbub - started %s feed in %s channel" % (name, channel))
return True
def stop(self, botname, type, name, channel):
""" stop watching a feed. """
item = self.byname(name)
if not item:
return False
try:
logging.warn("trying to remove %s from %s feed list" % (name, channel))
if type == "wave":
chan = Wave(channel)
else:
chan = ChannelBase(channel)
chan.data.feeds.remove(name)
chan.save()
except ValueError:
logging.warn("can't remove %s from %s feed list" % (name, channel))
try:
item.data.watchchannels.remove([botname, type, channel])
item.save()
logging.debug("stopped %s feed in %s channel" % (name, channel))
except ValueError:
return False
return True
def clone(self, botname, type, newchannel, oldchannel):
""" clone feeds over to a new wave. """
feeds = self.getfeeds(oldchannel, type)
logging.debug("hubbub - clone - %s - %s - %s - %s - %s" % (botname, type, newchannel, oldchannel, feeds))
for feed in feeds:
self.stop(botname, type, feed, oldchannel)
self.start(botname, type, feed, newchannel)
return feeds
## defines
def work(botname, type, channel, result, item, *args, **kwargs):
watcher.work(botname, type, channel, result, item, *args, **kwargs)
# the watcher object
watcher = HubbubWatcher('hubbub')
## functions
def size():
""" return number of watched rss entries. """
return watcher.size()
## commands
def handle_hubbubsubscribe(bot, event):
""" <name> <url> .. subscribe to a hubbub feed """
for name in event.args:
item = watcher.byname(name)
if not item:
event.reply("%s feed is not yet added .. see hb-add" % name)
continue
#watcher.add(name, url, event.channel)
url = item.data.url
if not url:
event.reply('please provide a url for %s feed' % name)
return
if not url.startswith('http://'):
event.reply('%s doesnt start with "http://"' % url)
if not watcher.data['urls'].has_key(name):
watcher.add(name, url, event.channel)
if not watcher.byname(name):
watcher.add(name, url, event.channel)
response = subscribe(url)
event.reply("subscription send: %s - %s" % (url, response.status))
cmnds.add('hb-subscribe', handle_hubbubsubscribe, ['USER',])
examples.add('hb-subscribe', 'subscribe to a feed', 'hb-subscribe gozerrepo http://core.gozerbot.org/hg/dev/0.9')
def handle_hubbubclone(bot, event):
""" <channel> .. clone the feeds running in a channel. """
if not event.rest:
event.missing('<channel>')
event.done
feeds = watcher.clone(bot.name, bot.type, event.channel, event.rest)
event.reply('cloned the following feeds: ', feeds)
bot.say(event.rest, "this wave is continued in %s" % event.url)
cmnds.add('hb-clone', handle_hubbubclone, 'USER')
examples.add('hb-clone', 'clone feeds into new channel', 'hb-clone waveid')
def handle_hubbubcloneurl(bot, event):
""" <url> .. clone urls from http://host/feeds. """
if not event.rest:
event.missing('<url>')
event.done
import urllib2
try:
feeds = watcher.cloneurl(event.rest, event.auth)
event.reply('cloned the following feeds: ', feeds)
except urllib2.HTTPError, ex:
event.reply("hubbub - clone - %s" % str(ex))
cmnds.add('hb-cloneurl', handle_hubbubcloneurl, 'OPER')
examples.add('hb-cloneurl', 'clone feeds from remote url', 'hb-cloneurl http://gozerbot.org/feeds')
def handle_hubbubadd(bot, ievent):
""" <name> <url> .. add a hubbub item. """
try:
(name, url) = ievent.args
except ValueError:
ievent.missing('<name> <url>')
return
result = subscribe(url)
if int(result.status) > 200 and int(result.status) < 300:
watcher.add(name, url, ievent.userhost)
ievent.reply('%s feed added' % name)
else:
ievent.reply('%s feed NOT added. status code is %s' % (name, result.status))
cmnds.add('hb-add', handle_hubbubadd, 'USER')
examples.add('hb-add', 'hubbub-add <name> <url> to the watcher', 'hb-add gozerbot http://core.gozerbot.org/hg/dev/0.9/rss-log')
def handle_hubbubwatch(bot, ievent):
""" <feedname> .. enable a feed for watching. """
if not ievent.channel:
ievent.reply('no channel provided')
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<feedname>')
return
item = watcher.byname(name)
if item == None:
ievent.reply("we don't have a %s hubbub item" % name)
return
got = None
if not item.data.running or item.data.stoprunning:
item.data.running = 1
item.data.stoprunning = 0
got = True
watcher.save(name)
try:
watcher.watch(name)
except Exception, ex:
ievent.reply(str(ex))
return
if got:
ievent.reply('watcher started')
else:
ievent.reply('already watching %s' % name)
cmnds.add('hb-watch', handle_hubbubwatch, 'USER')
examples.add('hb-watch', 'hubbub-watch <name> [seconds to sleep] .. go watching <name>', 'hb-watch gozerbot')
def handle_hubbubstart(bot, ievent):
""" <list of feeds> .. start a feed to a user or channel/wave. """
feeds = ievent.args
if not feeds:
ievent.missing('<list of feeds>')
return
started = []
cantstart = []
if feeds[0] == 'all':
feeds = watcher.list()
for name in feeds:
if watcher.start(bot.name, bot.type, name, ievent.channel):
started.append(name)
else:
cantstart.append(name)
if bot.type == "wave":
wave = Wave(ievent.channel)
if wave and wave.data:
logging.debug("feed running in %s: %s" % (ievent.title, wave.data.feeds))
try:
ievent.set_title("FEEDPROVIDER - %s - #%s" % (' - '.join(wave.data.feeds), str(wave.data.nrcloned)))
except Exception, ex:
handle_exception()
if started:
ievent.reply('started: ', started)
else:
ievent.reply("sorry can't start: ", cantstart)
cmnds.add('hb-start', handle_hubbubstart, ['USER', 'GUEST'])
examples.add('hb-start', 'hubbub-start <list of feeds> .. start a hubbub feed (per user/channel) ', 'hb-start gozerbot')
def handle_hubbubstop(bot, ievent):
""" <list of feeds> .. stop a hubbub feed to a user. """
if not ievent.args:
ievent.missing('<list of feeds>')
return
feeds = ievent.args
stopped = []
cantstop = []
if feeds[0] == 'all':
feeds = watcher.listfeeds(bot.name, bot.type, ievent.channel)
for name in feeds:
if watcher.stop(bot.name, bot.type, name, ievent.channel):
stopped.append(name)
else:
cantstop.append(name)
if stopped:
ievent.reply('feeds stopped: ', feeds)
elif cantstop:
ievent.reply('failed to stop %s feed' % cantstop)
ievent.done()
cmnds.add('hb-stop', handle_hubbubstop, ['USER', 'GUEST'])
examples.add('hb-stop', 'hubbub-stop <list of names> .. stop a hubbub feed (per user/channel) ', 'hb-stop gozerbot')
def handle_hubbubstopall(bot, ievent):
""" [<channel>] .. stop all hubbub feeds to a channel. """
if not ievent.rest:
target = ievent.channel
else:
target = ievent.rest
stopped = []
feeds = watcher.getfeeds(target)
if feeds:
for feed in feeds:
if watcher.stop(bot.name, feed, target):
stopped.append(feed)
ievent.reply('stopped feeds: ', stopped)
else:
ievent.reply('no feeds running in %s' % target)
cmnds.add('hb-stopall', handle_hubbubstopall, ['HUBBUB', 'OPER'])
examples.add('hb-stopall', 'hubbub-stopall .. stop all hubbub feeds (per user/channel) ', 'hb-stopall')
def handle_hubbubchannels(bot, ievent):
""" <feedname> .. show channels of hubbub feed. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing("<feedname>")
return
item = watcher.byname(name)
if item == None:
ievent.reply("we don't have a %s hubbub object" % name)
return
if not item.data.watchchannels:
ievent.reply('%s is not in watch mode' % name)
return
result = []
for i in item.data.watchchannels:
result.append(str(i))
ievent.reply("channels of %s: " % name, result)
cmnds.add('hb-channels', handle_hubbubchannels, ['OPER', ])
examples.add('hb-channels', 'hb-channels <name> .. show channels', 'hb-channels gozerbot')
def handle_hubbubaddchannel(bot, ievent):
""" <feedname> [<botname>] [<bottype>] <channel> .. add a channel to
hubbub feed.
"""
try:
(name, botname, type, channel) = ievent.args
except ValueError:
try:
botname = bot.name
(name, type, channel) = ievent.args
except ValueError:
try:
botname = bot.name
type = bot.type
(name, channel) = ievent.args
type = bot.type
except ValueError:
try:
botname = bot.name
name = ievent.args[0]
type = bot.type
channel = ievent.channel
except IndexError:
ievent.missing('<name> [<botname>][<bottype>] <channel>')
return
item = watcher.byname(name)
if item == None:
ievent.reply("we don't have a %s hubbub object" % name)
return
if not item.data.running:
ievent.reply('%s watcher is not running' % name)
return
if jsonstring([botname, type, channel]) in item.data.watchchannels or [botname, type, channel] in item.data.watchchannels:
ievent.reply('we are already monitoring %s on (%s,%s)' % \
(name, type, channel))
return
item.data.watchchannels.append([botname, type, channel])
item.save()
ievent.reply('%s added to %s hubbub item' % (channel, name))
cmnds.add('hb-addchannel', handle_hubbubaddchannel, ['OPER', ])
examples.add('hb-addchannel', 'hb-addchannel <name> [<bottype>] <channel> \
..add <channel> or <bottype> <channel> to watchchannels of <name>', \
'1) hb-addchannel gozerbot #dunkbots 2) hb-addchannel gozerbot main #dunkbots')
def handle_hubbubsetitems(bot, ievent):
""" <feedname> <items> .. set items (tokens) of a feed. """
try:
(name, items) = ievent.args[0], ievent.args[1:]
except ValueError:
ievent.missing('<feedname> <tokens>')
return
target = ievent.channel
feed = watcher.byname(name)
if not feed:
ievent.reply("we don't have a %s feed" % name)
return
feed.itemslists.data[jsonstring([name, target])] = items
feed.itemslists.save()
ievent.reply('%s added to (%s,%s) itemslist' % (items, name, target))
cmnds.add('hb-setitems', handle_hubbubsetitems, ['GUEST', 'USER'])
examples.add('hb-setitems', 'set tokens of the itemslist (per user/channel)', 'hb-setitems gozerbot author author link pubDate')
def handle_hubbubadditem(bot, ievent):
""" <name> <token> .. add an item (token) to a feeds itemslist. """
try:
(name, item) = ievent.args
except ValueError:
ievent.missing('<feedname> <token>')
return
target = ievent.channel
feed = watcher.byname(name)
if not feed:
ievent.reply("we don't have a %s feed" % name)
return
try:
feed.itemslists.data[jsonstring([name, target])].append(item)
except KeyError:
feed.itemslists.data[jsonstring([name, target])] = ['title', 'link']
feed.itemslists.save()
ievent.reply('%s added to (%s,%s) itemslist' % (item, name, target))
cmnds.add('hb-additem', handle_hubbubadditem, ['GUEST', 'USER'])
examples.add('hb-additem', 'add a token to the itemslist (per user/channel)', 'hb-additem gozerbot link')
def handle_hubbubdelitem(bot, ievent):
""" <feedname> <token> .. delete item (token) from a feeds itemlist. """
try:
(name, item) = ievent.args
except ValueError:
ievent.missing('<name> <item>')
return
target = ievent.channel
feed = watcher.byname(name)
if not feed:
ievent.reply("we don't have a %s feed" % name)
return
try:
feed.itemslists.data[jsonstring([name, target])].remove(item)
feed.itemslists.save()
except (NoSuchFeed, ValueError):
ievent.reply("we don't have a %s feed" % name)
return
ievent.reply('%s removed from (%s,%s) itemslist' % (item, name, target))
cmnds.add('hb-delitem', handle_hubbubdelitem, ['GUEST', 'USER'])
examples.add('hb-delitem', 'remove a token from the itemslist (per user/channel)', 'hb-delitem gozerbot link')
def handle_hubbubmarkuplist(bot, ievent):
""" show possible markups that can be used. """
ievent.reply('possible markups ==> ' , possiblemarkup)
cmnds.add('hb-markuplist', handle_hubbubmarkuplist, ['USER', 'GUEST'])
examples.add('hb-markuplist', 'show possible markup entries', 'hb-markuplist')
def handle_hubbubmarkup(bot, ievent):
""" <feedname> .. show the markup of a feed (channel specific). """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<feedname>')
return
target = ievent.channel
feed = watcher.byname(name)
if not feed:
ievent.reply("we don't have a %s feed" % name)
return
try:
ievent.reply(str(feed.markup[jsonstring([name, target])]))
except KeyError:
pass
cmnds.add('hb-markup', handle_hubbubmarkup, ['GUEST', 'USER'])
examples.add('hb-markup', 'show markup list for a feed (per user/channel)', 'hb-markup gozerbot')
def handle_hubbubaddmarkup(bot, ievent):
""" <feedname> <item> <value> .. add a markup to a feeds markuplist. """
try:
(name, item, value) = ievent.args
except ValueError:
ievent.missing('<feedname> <item> <value>')
return
target = ievent.channel
try:
value = int(value)
except ValueError:
pass
feed = watcher.byname(name)
if not feed:
ievent.reply("we don't have a %s feed" % name)
return
try:
feed.markup.set(jsonstring([name, target]), item, value)
feed.markup.save()
ievent.reply('%s added to (%s,%s) markuplist' % (item, name, target))
except KeyError:
ievent.reply("no (%s,%s) feed available" % (name, target))
cmnds.add('hb-addmarkup', handle_hubbubaddmarkup, ['GUEST', 'USER'])
examples.add('hb-addmarkup', 'add a markup option to the markuplist (per user/channel)', 'hb-addmarkup gozerbot all-lines 1')
def handle_hubbubdelmarkup(bot, ievent):
""" <feedname> <item> .. delete markup item from a feed's markuplist. """
try:
(name, item) = ievent.args
except ValueError:
ievent.missing('<feedname> <item>')
return
target = ievent.channel
feed = watcher.byname(name)
if not feed:
ievent.reply("we don't have a %s feed" % name)
return
try:
del feed.markup[jsonstring([name, target])][item]
except (KeyError, TypeError):
ievent.reply("can't remove %s from %s feed's markup" % (item, name))
return
feed.markup.save()
ievent.reply('%s removed from (%s,%s) markuplist' % (item, name, target))
cmnds.add('hb-delmarkup', handle_hubbubdelmarkup, ['GUEST', 'USER'])
examples.add('hb-delmarkup', 'remove a markup option from the markuplist (per user/channel)', 'hb-delmarkup gozerbot all-lines')
def handle_hubbubdelchannel(bot, ievent):
""" <name> [<botname>] [<bottype>] <channel> .. delete channel
from hubbub item.
"""
bottype = None
try:
(name, botname, bottype, channel) = ievent.args
except ValueError:
try:
botname = bot.name
(name, type, channel) = ievent.args
except ValueError:
try:
botname = bot.name
name = ievent.args[0]
type = bot.type
channel = ievent.channel
except IndexError:
ievent.missing('<feedname> [<botname>] [<bottype>] [<channel>]')
return
item = watcher.byname(name)
if item == None:
ievent.reply("we don't have a %s object" % name)
return
if jsonstring([botname, type, channel]) in item.data.watchchannels:
item.data.watchchannels.remove(jsonstring([botname, type, channel]))
ievent.reply('%s removed from %s hubbub item' % (channel, name))
elif [type, channel] in item.data.watchchannels:
item.data.watchchannels.remove([botname, type, channel])
ievent.reply('%s removed from %s hubbub item' % (channel, name))
else:
ievent.reply('we are not monitoring %s on (%s,%s)' % (name, type, \
channel))
return
item.save()
cmnds.add('hb-delchannel', handle_hubbubdelchannel, ['OPER', ])
examples.add('hubbub-delchannel', 'hb-delchannel <name> [<bottype>] \
[<channel>] .. delete <channel> or <bottype> <channel> from watchchannels of \
<name>', '1) hb-delchannel gozerbot #dunkbots 2) hb-delchannel gozerbot main #dunkbots')
def handle_hubbubstopwatch(bot, ievent):
""" <feedname> .. stop watching a feed. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<feedname>')
return
item = watcher.byname(name)
if not item:
ievent.reply("there is no %s item" % name)
return
if not watcher.stopwatch(name):
ievent.reply("can't stop %s watcher" % name)
return
ievent.reply('stopped %s hubbub watch' % name)
cmnds.add('hb-stopwatch', handle_hubbubstopwatch, ['OPER', ])
examples.add('hb-stopwatch', 'hubbub-stopwatch <name> .. stop polling <name>', 'hb-stopwatch gozerbot')
def handle_hubbubget(bot, ievent):
""" <feedname> .. fetch feed data. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<feedname>')
return
channel = ievent.channel
item = watcher.byname(name)
if item == None:
ievent.reply("we don't have a %s item" % name)
return
try:
result = watcher.fetchdata(name)
except Exception, ex:
ievent.reply('%s error: %s' % (name, str(ex)))
return
if item.markup.get(jsonstring([name, channel]), 'reverse-order'):
result = result[::-1]
response = watcher.makeresponse(name, result, ievent.channel)
if response:
ievent.reply("results of %s: %s" % (name, response))
else:
ievent.reply("can't make a reponse out of %s" % name)
cmnds.add('hb-get', handle_hubbubget, ['HUBBUB', 'USER'], threaded=True)
examples.add('hb-get', 'hubbub-get <name> .. get data from <name>', 'hb-get gozerbot')
def handle_hubbubrunning(bot, ievent):
""" show which feeds are running. """
result = watcher.runners()
resultlist = []
teller = 1
for i in result:
resultlist.append("%s %s" % (i[0], i[1]))
if resultlist:
ievent.reply("running hubbub watchers: ", resultlist, nr=1)
else:
ievent.reply('nothing running yet')
cmnds.add('hb-running', handle_hubbubrunning, ['HUBBUB', 'USER'])
examples.add('hb-running', 'hubbub-running .. get running feeds', 'hb-running')
def handle_hubbublist(bot, ievent):
""" return list of available feeds. """
result = watcher.list()
result.sort()
if result:
ievent.reply("hubbub items: ", result)
else:
ievent.reply('no hubbub items yet')
cmnds.add('hb-list', handle_hubbublist, ['GUEST', 'USER'])
examples.add('hb-list', 'get list of hubbub items', 'hb-list')
def handle_hubbuburl(bot, ievent):
""" <feedname> .. return url of feed. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<feedname>')
return
if not watcher.ownercheck(name, ievent.userhost):
ievent.reply("you are not the owner of the %s feed" % name)
return
result = watcher.url(name)
if not result:
ievent.reply("can't fetch url for %s" % name)
return
try:
if ':' in result.split('/')[1]:
if not ievent.msg:
ievent.reply('run this command in a private message')
return
except (TypeError, ValueError, IndexError):
pass
ievent.reply('url of %s: %s' % (name, result))
cmnds.add('hb-url', handle_hubbuburl, ['OPER', ])
examples.add('hb-url', 'hb-url <name> .. get url from hubbub item', 'hb-url gozerbot')
def handle_hubbubitemslist(bot, ievent):
""" <feedname> .. show itemslist (tokens) of hubbub item. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<feedname>')
return
feed = watcher.byname(name)
if not feed:
ievent.reply("we don't have a %s feed" % name)
return
try:
itemslist = feed.itemslists.data[jsonstring([name, ievent.channel])]
except KeyError:
ievent.reply("no itemslist set for (%s, %s)" % (name, ievent.channel))
return
ievent.reply("itemslist of (%s, %s): " % (name, ievent.channel), itemslist)
cmnds.add('hb-itemslist', handle_hubbubitemslist, ['GUEST', 'USER'])
examples.add('hb-itemslist', 'hb-itemslist <name> .. get itemslist of <name> ', 'hb-itemslist gozerbot')
def handle_hubbubscan(bot, ievent):
""" <feedname> .. scan feed for available tokens. """
try:
name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
if not watcher.byname(name):
ievent.reply('no %s feeds available' % name)
return
try:
result = watcher.scan(name)
except Exception, ex:
ievent.reply(str(ex))
return
if result == None:
ievent.reply("can't get data for %s" % name)
return
res = []
for i in result:
res.append("%s=%s" % i)
ievent.reply("tokens of %s: " % name, res)
cmnds.add('hb-scan', handle_hubbubscan, ['USER', 'GUEST'])
examples.add('hb-scan', 'hb-scan <name> .. get possible items of <name> ', 'hb-scan gozerbot')
def handle_hubbubfeeds(bot, ievent):
""" <channel> .. show what feeds are running in a channel. """
try:
channel = ievent.args[0]
except IndexError:
channel = ievent.channel
try:
result = watcher.getfeeds(channel, type=bot.type)
if result:
ievent.reply("feeds running: ", result)
else:
ievent.reply('no feeds running')
except Exception, ex:
ievent.reply("ERROR: %s" % str(ex))
cmnds.add('hb-feeds', handle_hubbubfeeds, ['USER', 'GUEST'])
examples.add('hb-feeds', 'hb-feeds <name> .. show what feeds are running in a channel', '1) hb-feeds 2) hb-feeds #dunkbots')
def handle_hubbubwelcome(bot, ievent):
""" show hubbub welcome message, used by the gadget. """
ievent.reply("hb-register <feedname> <url>")
cmnds.add('hb-welcome', handle_hubbubwelcome, ['USER', 'GUEST'])
examples.add('hb-welcome', 'hb-welcome .. show welcome message', 'hb-welcome')
def handle_hubbubregister(bot, ievent):
""" <name> <url> .. register a url and start the feed in one pass. """
if not ievent.waveid:
target = ievent.channel
else:
target = ievent.waveid
if len(ievent.args) > 2:
ievent.reply("feed name needs to be 1 word.")
return
try:
(name, url) = ievent.args
except ValueError:
try:
name = ievent.args[0]
except IndexError:
ievent.reply("i need a feed name and a feed url to work with")
return
item = watcher.byname(name)
if item:
if not name in watcher.getfeeds(ievent.channel):
watcher.start(bot.name, bot.type, name, target)
ievent.reply('started %s feed. entries will show up when the feed is updated.' % name)
if bot.type == "wave":
wave = Wave(ievent.waveid)
logging.debug("feed running in %s: %s" % (ievent.title, wave.data.feeds))
if name not in ievent.title:
ievent.set_title("FEEDPROVIDER - %s - #%s" % (' - '.join(wave.data.feeds), str(wave.data.nrcloned)))
else:
ievent.reply("feed %s is already running." % name)
else:
ievent.reply("i don't know a %s feed. please enter name and url." % name)
return
return
if not url.startswith("http"):
ievent.reply("the feedurl needs to start with http(s)://")
return
try:
result = subscribe(url)
if int(result.status) > 200 and int(result.status) < 300:
if watcher.add(name, url, ievent.userhost):
watcher.start(bot.name, bot.type, name, target)
ievent.reply('started %s feed. entries will show up when the feed is updated.' % name)
if bot.type == "wave":
wave = Wave(ievent.waveid)
logging.debug("feed running in %s: %s" % (ievent.title, wave.data.feeds))
if name not in ievent.title:
ievent.set_title("FEEDPROVIDER - %s - #%s" % (' - '.join(wave.data.feeds), str(wave.data.nrcloned)))
return
else:
ievent.reply("there already exists a %s feed. please choose a different name" % name)
return
else:
ievent.reply('feed %s NOT added. Status code is %s. please check if the feed is valid.' % (name, result.status))
except Exception, ex:
handle_exception()
ievent.reply("Oops something went wrong: %s" % str(ex))
cmnds.add('hb-register', handle_hubbubregister, ['USER', 'GUEST'])
examples.add('hb-register', 'hb-register .. register url and start it in one pass', 'hb-register hgrepo http://code.google.com/feeds/p/feedprovider/hgchanges/basic')
| Python |
# IP subnet calculator
# (c) 2007 Wijnand 'tehmaze' Modderman - http://tehmaze.com
# BSD License
from gozerlib.commands import cmnds
from gozerlib.examples import examples
""" IP subnet calculator. this module allows you to perform network
calculations.
"""
"""
# IP subnet calculator
# (C) 2007 Wijnand 'tehmaze' Modderman - http://tehmaze.com
# BSD License
#
# ABOUT
# This module allows you to perform network calculations.
#
# CHANGELOG
# 2007-10-26: Added IPv6 support, as well as a lot of other functions,
# refactored the calculations.
# 2007-10-25: Initial writeup, because I could not find any other workable
# implementation.
#
# TODO
# * add CLI parser
#
# REFERENCES
# * http://www.estoile.com/links/ipv6.pdf
# * http://www.iana.org/assignments/ipv4-address-space
# * http://www.iana.org/assignments/multicast-addresses
# * http://www.iana.org/assignments/ipv6-address-space
# * http://www.iana.org/assignments/ipv6-tla-assignments
# * http://www.iana.org/assignments/ipv6-multicast-addresses
# * http://www.iana.org/assignments/ipv6-anycast-addresses
#
# THANKS (testing, tips)
# * Bastiaan (trbs)
# * Peter van Dijk (Habbie)
# * Hans van Kranenburg (Knorrie)
#
"""
__version__ = '0.2a'
import types, socket
class IP(object):
# Hex-to-Bin conversion masks
_bitmask = {
'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'
}
# IP range specific information, see IANA allocations.
_range = {
4: {
'01' : 'CLASS A',
'10' : 'CLASS B',
'110' : 'CLASS C',
'1110' : 'CLASS D MULTICAST',
'11100000' : 'CLASS D LINKLOCAL',
'1111' : 'CLASS E',
'00001010' : 'PRIVATE RFC1918', # 10/8
'101011000001' : 'PRIVATE RFC1918', # 172.16/12
'1100000010101000' : 'PRIVATE RFC1918', # 192.168/16
},
6: {
'00000000' : 'RESERVED', # ::/8
'00000001' : 'UNASSIGNED', # 100::/8
'0000001' : 'NSAP', # 200::/7
'0000010' : 'IPX', # 400::/7
'0000011' : 'UNASSIGNED', # 600::/7
'00001' : 'UNASSIGNED', # 800::/5
'0001' : 'UNASSIGNED', # 1000::/4
'0010000000000000' : 'RESERVED', # 2000::/16 Reserved
'0010000000000001' : 'ASSIGNABLE', # 2001::/16 Sub-TLA Assignments [RFC2450]
'00100000000000010000000': 'ASSIGNABLE IANA', # 2001:0000::/29 - 2001:01F8::/29 IANA
'00100000000000010000001': 'ASSIGNABLE APNIC', # 2001:0200::/29 - 2001:03F8::/29 APNIC
'00100000000000010000010': 'ASSIGNABLE ARIN', # 2001:0400::/29 - 2001:05F8::/29 ARIN
'00100000000000010000011': 'ASSIGNABLE RIPE', # 2001:0600::/29 - 2001:07F8::/29 RIPE NCC
'0010000000000010' : '6TO4', # 2002::/16 "6to4" [RFC3056]
'0011111111111110' : '6BONE TEST', # 3ffe::/16 6bone Testing [RFC2471]
'0011111111111111' : 'RESERVED', # 3fff::/16 Reserved
'010' : 'GLOBAL-UNICAST', # 4000::/3
'011' : 'UNASSIGNED', # 6000::/3
'100' : 'GEO-UNICAST', # 8000::/3
'101' : 'UNASSIGNED', # a000::/3
'110' : 'UNASSIGNED', # c000::/3
'1110' : 'UNASSIGNED', # e000::/4
'11110' : 'UNASSIGNED', # f000::/5
'111110' : 'UNASSIGNED', # f800::/6
'1111110' : 'UNASSIGNED', # fc00::/7
'111111100' : 'UNASSIGNED', # fe00::/9
'1111111010' : 'LINKLOCAL', # fe80::/10
'1111111011' : 'SITELOCAL', # fec0::/10
'11111111' : 'MULTICAST', # ff00::/8
'0' * 96 : 'IPV4COMP', # ::/96
'0' * 80 + '1' * 16 : 'IPV4MAP', # ::ffff:0:0/96
'0' * 128 : 'UNSPECIFIED', # ::/128
'0' * 127 + '1' : 'LOOPBACK' # ::1/128
}
}
def __init__(self, ip, mask=None, version=0):
self.mask = mask
self.v = 0
# Parse input
if isinstance(ip, IP):
self.ip = ip.ip
self.dq = ip.dq
self.v = ip.v
self.mask = ip.mask
elif type(ip) in [types.IntType, types.LongType]:
self.ip = long(ip)
if self.ip <= 0xffffffff:
self.v = version or 4
self.dq = self._itodq(ip)
else:
self.v = version or 4
self.dq = self._itodq(ip)
else:
# If string is in CIDR notation
if '/' in ip:
ip, mask = ip.split('/', 1)
self.mask = int(mask)
self.v = version or 0
self.dq = ip
self.ip = self._dqtoi(ip)
assert self.v != 0, 'Could not parse input'
# Netmask defaults to one ip
if self.mask is None:
self.mask = self.v == 4 and 32 or 128
# Validate subnet size
if self.v == 6:
self.dq = self._itodq(self.ip)
if self.mask < 0 or self.mask > 128:
raise ValueError, "IPv6 subnet size must be between 0 and 128"
elif self.v == 4:
if self.mask < 0 or self.mask > 32:
raise ValueError, "IPv4 subnet size must be between 0 and 32"
def bin(self):
'''
Full-length binary representation of the IP address.
'''
h = hex(self.ip).lower().rstrip('l')
b = ''.join(self._bitmask[x] for x in h[2:])
l = self.v == 4 and 32 or 128
return ''.join('0' for x in xrange(len(b), l)) + b
def hex(self):
'''
Full-length hexadecimal representation of the IP address.
'''
if self.v == 4:
return '%08x' % self.ip
else:
return '%032x' % self.ip
def subnet(self):
return self.mask
def version(self):
return self.v
def info(self):
'''
Show IANA allocation information for the current IP address.
'''
b = self.bin()
l = self.v == 4 and 32 or 128
for i in range(len(b), 0, -1):
if self._range[self.v].has_key(b[:i]):
return self._range[self.v][b[:i]]
return 'UNKNOWN'
def _dqtoi(self, dq):
'''
Convert dotquad or hextet to long.
'''
# hex notation
if dq.startswith('0x'):
ip = long(dq[2:], 16)
if ip > 0xffffffffffffffffffffffffffffffffL:
raise ValueError, "%r: IP address is bigger than 2^128" % dq
if ip <= 0xffffffff:
self.v = 4
else:
self.v = 6
return ip
# IPv6
if ':' in dq:
hx = dq.split(':') # split hextets
if ':::' in dq:
raise ValueError, "%r: IPv6 address can't contain :::" % dq
# Mixed address (or 4-in-6), ::ffff:192.0.2.42
if '.' in dq:
return self._dqtoi(hx[-1])
if len(hx) > 8:
raise ValueError, "%r: IPv6 address with more than 8 hexletts" % dq
elif len(hx) < 8:
# No :: in address
if not '' in hx:
raise ValueError, "%r: IPv6 address invalid: compressed format malformed" % dq
elif not (dq.startswith('::') or dq.endswith('::')) and len([x for x in hx if x == '']) > 1:
raise ValueError, "%r: IPv6 address invalid: compressed format malformed" % dq
ix = hx.index('')
px = len(hx[ix+1:])
for x in xrange(ix+px+1, 8):
hx.insert(ix, '0')
elif dq.endswith('::'):
pass
elif '' in hx:
raise ValueError, "%r: IPv6 address invalid: compressed format detected in full notation" % dq
ip = ''
hx = [x == '' and '0' or x for x in hx]
for h in hx:
if len(h) < 4:
h = '%04x' % int(h, 16)
if 0 > int(h, 16) > 0xffff:
raise ValueError, "%r: IPv6 address invalid: hextets should be between 0x0000 and 0xffff" % dq
ip += h
self.v = 6
return long(ip, 16)
elif len(dq) == 32:
# Assume full heximal notation
self.v = 6
return long(h, 16)
# IPv4
if '.' in dq:
q = dq.split('.')
if len(q) > 4:
raise ValueError, "%r: IPv4 address invalid: more than 4 bytes" % dq
for x in q:
if 0 > int(x) > 255:
raise ValueError, "%r: IPv4 address invalid: bytes should be between 0 and 255" % dq
self.v = 4
return long(q[0])<<24 | long(q[1])<<16 | long(q[2])<<8 | long(q[3])
raise ValueError, "Invalid address input"
def _itodq(self, n):
'''
Convert long to dotquad or hextet.
'''
if self.v == 4:
return '.'.join(map(str, [(n>>24) & 0xff, (n>>16) & 0xff, (n>>8) & 0xff, n & 0xff]))
else:
n = '%032x' % n
return ':'.join(n[4*x:4*x+4] for x in xrange(0, 8))
def __str__(self):
return self.dq
def __int__(self):
return int(self.ip)
def __long__(self):
return self.ip
def size(self):
return 1
def clone(self):
'''
Return a new <IP> object with a copy of this one.
'''
return IP(self)
def to_ipv4(self):
'''
Convert (a IPv6) IP address to an IPv4 address, if possible. Only works
for IPv4-compat (::/96) and 6-to-4 (2002::/16) addresses.
'''
if self.v == 4:
return self
else:
if self.bin().startswith('0' * 96):
return IP(long(self), version=4)
elif long(self) & 0x20020000000000000000000000000000L:
return IP((long(self)-0x20020000000000000000000000000000L)>>80, version=4)
else:
return ValueError, "%r: IPv6 address is not IPv4 compatible, nor a 6-to-4 IP" % self.dq
def to_ipv6(self, type='6-to-4'):
'''
Convert (a IPv4) IP address to an IPv6 address.
'''
assert type in ['6-to-4', 'compat'], 'Conversion type not supported'
if self.v == 4:
if type == '6-to-4':
return IP(0x20020000000000000000000000000000L | long(self)<<80, version=6)
elif type == 'compat':
return IP(long(self), version=6)
else:
return self
def to_tuple(self):
'''
Used for comparisons.
'''
return (self.dq, self.mask)
class Network(IP):
'''
Network slice calculations.
'''
def netmask(self):
'''
Network netmask derived from subnet size.
'''
if self.version() == 4:
return IP((0xffffffffL >> (32-self.mask)) << (32-self.mask), version=self.version())
else:
return IP((0xffffffffffffffffffffffffffffffffL >> (128-self.mask)) << (128-self.mask), version=self.version())
def network(self):
'''
Network address.
'''
return IP(self.ip & long(self.netmask()), version=self.version())
def broadcast(self):
'''
Broadcast address.
'''
# XXX: IPv6 doesn't have a broadcast address, but it's used for other
# calculations such as <Network.host_last>.
if self.version() == 4:
return IP(long(self.network()) | (0xffffffff - long(self.netmask())), version=self.version())
else:
return IP(long(self.network()) | (0xffffffffffffffffffffffffffffffffL - long(self.netmask())), version=self.version())
def host_first(self):
'''
First available host in this subnet.
'''
if (self.version() == 4 and self.mask == 32) or (self.version() == 6 and self.mask == 128):
return self
return IP(long(self.network())+1, version=self.version())
def host_last(self):
'''
Last available host in this subnet.
'''
if (self.version() == 4 and self.mask == 32) or (self.version() == 6 and self.mask == 128):
return self
return IP(long(self.broadcast())-1, version=self.version())
def in_network(self, other):
'''
Check if the given IP address is within this network.
'''
other = Network(other)
return long(other) >= long(self) and long(other) < long(self) + self.size() - other.size() + 1
def __contains__(self, ip):
'''
Check if the given ip is part of the network.
>>> '192.0.2.42' in Network('192.0.2.0/24')
True
>>> '192.168.2.42' in Network('192.0.2.0/24')
False
'''
return self.in_network(ip)
def __lt__(self, other):
return self.size() < IP(other).size()
def __le__(self, other):
return self.size() <= IP(other).size()
def __gt__(self, other):
return self.size() > IP(other).size()
def __ge__(self, other):
return self.size() >= IP(other).size()
def __iter__(self):
'''
Generate a range of ip addresses within the network.
>>> for ip in Network('192.168.114.0/30'): print str(ip)
...
192.168.114.0
192.168.114.1
192.168.114.2
192.168.114.3
'''
for ip in [IP(long(self)+x) for x in xrange(0, self.size())]:
yield ip
def has_key(self, ip):
'''
Check if the given ip is part of the network.
>>> net = Network('192.0.2.0/24')
>>> net.has_key('192.168.2.0')
False
>>> net.has_key('192.0.2.42')
True
'''
return self.__contains__(ip)
def size(self):
'''
Number of ip's within the network.
'''
return 2 ** ((self.version() == 4 and 32 or 128) - self.mask)
def handle_ipcalc(bot, ievent):
""" <ip>[</size>] .. calculate IP subnets. """
if not ievent.args:
ievent.missing('<ip>[/<size>]')
return
try:
net = Network(ievent.args[0])
except ValueError, e:
ievent.reply('error: %s' % e)
return
ievent.reply('version: %d, address: %s, network size: %d, network address: %s, netmask: %s, first host in network: %s, last host in network: %s, network info: %s' % \
(net.version(), str(net), net.mask, net.network(), net.netmask(), net.host_first(), net.host_last(), net.info()))
cmnds.add('ipcalc', handle_ipcalc, ['USER', 'GUEST'])
examples.add('ipcalc', 'ip calculator', 'ipcalc 127.0.0.1/12')
| Python |
# commonplugs/remote.py
#
#
""" events passed as json over xmpp. """
## gozerbot imports
from gozerlib.callbacks import callbacks
from gozerlib.utils.url import posturl, getpostdata
from gozerlib.persist import PlugPersist
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.utils.exception import handle_exception
from gozerlib.remote.bot import RemoteBot
from gozerlib.config import cfg, Config
## simplejson imports
from simplejson import dumps
## basic imports
import socket
import re
## defines
outurl = "http://jsbbot.appspot.com/remote"
state = PlugPersist('remote')
if not state.data.outs:
state.data.outs = [outurl, ]
if not state.data.forward:
state.data.forward = []
## commands
def handle_remote_addout(bot, event):
""" add a bot (JID) to receive out events. """
global state
if not event.rest:
event.missing('<url>')
return
if not event.rest in state.data['outs']:
state.data['outs'].append(event.rest)
state.save()
event.done()
cmnds.add('remote-addout', handle_remote_addout, 'OPER')
def handle_remote_delout(bot, event):
""" stop sending events to another bot. """
global state
if not event.rest:
event.missing('<url>')
return
try:
state.data['outs'].remove(event.rest)
state.save()
except ValueError:
pass
event.done()
cmnds.add('remote-delout', handle_remote_delout, 'OPER')
def handle_remote_outs(bot, event):
""" show to which other bots we are sending. """
event.reply(state.data['outs'])
cmnds.add('remote-outs', handle_remote_outs, 'OPER')
def handle_remoteforward(bot, event):
""" forward all events occuring on channel (wave) to the remotenet. """
if not event.args:
event.missing('<channel>')
return
state.data['forward'].append(event.args[0])
state.save()
event.done()
cmnds.add('remote-forward', handle_remoteforward, 'OPER')
examples.add('remote-forward', 'add a forward item so that channels matching this get send over the remotenet', 'remote-forward #dunkbots')
def handle_remotedelforward(bot, event):
""" stop forwarding a channel (wave) to the remotenet. """
if not event.args:
event.missing('<channel>')
return
try:
state.data['forward'].remove(event.args[0])
state.save()
event.done()
except ValueError:
event.reply("we are not forwarding %s" % event.args[0])
cmnds.add('remote-delforward', handle_remotedelforward, 'OPER')
examples.add('remote-delforward', 'remove a forward item so that channels matching this no longer get send over the remotenet', 'remote-delforward #dunkbots')
def handle_remotelistforward(bot, event):
""" list all forwarded channels (waves). """
event.reply("forwards: ", state.data['forward'])
cmnds.add('remote-listforward', handle_remotelistforward, 'OPER')
examples.add('remote-listforward', 'show forwards', 'remote-listforward')
def handle_remotecmnd(bot, event):
""" do a command on the remotenet. """
cmndstring = event.rest
if not cmndstring:
event.missing("<cmnd>")
return
gnbot = RemoteBot(outs=state.data.outs)
gnbot.addouts(state.data.outs)
event.reply("sending to: ", gnbot.outs)
gnbot.cmnd(event, "!%s" % cmndstring)
cmnds.add('cmnd', handle_remotecmnd, 'OPER')
examples.add('cmnd', 'execute a command on the remotenet', 'cmnd version')
| Python |
# commonplugs/relay.py
#
#
""" relay to other users/channels/waves. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.callbacks import callbacks
from gozerlib.persist import PlugPersist
from gozerlib.examples import examples
from gozerlib.fleet import fleet
from gozerlib.utils.exception import handle_exception
from gozerlib.errors import NoSuchWave
## basic imports
import logging
from copy import deepcopy as cpy
## plugin state .. this is where the relay plugin data lives. It's JSON string
## put into the database with memcache in between. The data is accessible
## through object.data. When data changes call object.save()
## see gozerlib/persist/persist.py
block = PlugPersist('block')
relay = PlugPersist('relay')
## CALLBACKS
## these are the callbacks that do the hard work of the relay plugin takes
## place. The precondition is called to see whether the callback should fire
## or not. It should return True or False.
## see gozerlib/callbacks.py
def relayprecondition(bot, event):
""" check to see whether the callback needs to be executed. """
origin = event.origin or event.channel
if event.txt and origin in relay.data and not event.iscmnd and not event.isremote:
return True
return False
## CORE BUSINESS
## this callback does all the relaying. It receives the event that triggered
## the callback and checks whether there are relays for the channel the event
## took place (the bot uses the users JID in the jabber and web case (web users
## must be registered)
def relaycallback(bot, event):
""" this is the callbacks that handles the responses to questions. """
# determine where the event came from
origin = event.origin or event.channel
try:
# loop over relays for origin
for botname, type, target in relay.data[origin]:
try:
logging.debug('trying relay of %s to (%s,%s)' % (origin, type, target))
# tests to prevent looping
if event.txt.find('] [') != -1:
continue
if type == bot.type and origin == target:
continue
if "[%s]" % event.userhost in event.txt:
continue
if "[%s]" % target in event.txt:
continue
if "[%s]" % bot.name in event.txt:
continue
if "[%s]" % botname in event.txt:
continue
# check whether relay is blocked
if block.data.has_key(origin):
if [botname, type, target] in block.data[origin]:
continue
# retrieve the bot from fleet (based on type)
outbot = fleet.makebot(botname, type)
if outbot:
logging.debug('relay - outbot found - %s - %s' % (outbot.name, outbot.type))
# we got bot .. use it to send the relayed message
txt = "[%s] %s" % (event.userhost, event.txt)
if txt.find('] [') != -1:
continue
outbot.say(target, txt)
else:
logging.error("can't find %s bot" % type)
except Exception, ex:
handle_exception()
except KeyError:
pass
## MORE CORE BUSINESS
## this is the place where the callbacks get registered. The first argument is
## the string representation of the event type, MESSAGE is for jabber message,
## EXEC is for the gadget handling, WEB for the website, BLIP_SUBMITTED for
## wave and OUTPUT for the outputcache (both used in wave and web).
callbacks.add('MESSAGE', relaycallback, relayprecondition)
callbacks.add('EXEC', relaycallback, relayprecondition)
callbacks.add('WEB', relaycallback, relayprecondition)
callbacks.add('BLIP_SUBMITTED', relaycallback, relayprecondition)
callbacks.add('OUTPUT', relaycallback, relayprecondition)
callbacks.add('PRIVMSG', relaycallback, relayprecondition)
## COMMANDS
## this is where the commands for the relay plugin are defined, Arguments to a
## command function are the bot that the event occured on and the event that
## triggered the command. Think the code speaks for itself here ;]
def handle_relayclone(bot, event):
""" clone relays from one channel to the other. """
new = event.origin or event.channel
try:
old = event.args[0]
except IndexError, ex:
event.missing('<old target>')
return
try:
relay.data[new] = list(relay.data[old])
del relay.data[old]
relay.save()
except KeyError:
event.reply("i didn't join the %s wave" % old)
return
except Exception, ex:
handle_exception()
event.done()
cmnds.add('relay-clone', handle_relayclone, 'OPER')
examples.add('relay-clone', 'clone relay of old wave to the new', 'relay-clone googlewave.com!w+Pu4YwndxA')
def handle_relay(bot, event):
""" [<botname>] <type> <target> .. open a relay to a user. all input from us will be relayed. """
try:
(botname, type, target) = event.args
except ValueError:
try:
botname = bot.name
(type, target) = event.args
except ValueError:
event.missing('[<botname>] <bottype> <target>')
return
origin = event.origin or event.channel
if origin == target:
event.reply("can't relay to yourself")
return
if not relay.data.has_key(origin):
relay.data[origin] = []
try:
if not [type, target] in relay.data[origin]:
relay.data[origin].append([botname, type, target])
relay.save()
except KeyError:
relay.data[origin] = [[botname, type, target], ]
relay.save()
event.done()
cmnds.add('relay', handle_relay, 'USER')
examples.add('relay', 'open a relay to another user', 'relay bthate@gmail.com')
def handle_relaystop(bot, event):
""" stop a relay to a user. all relaying to target will be ignore. """
try:
(type, target) = event.args
except ValueError:
try:
target = event.args[0]
type = bot.type
except IndexError:
type = bot.type
target = event.channel
origin = event.origin or event.channel
try:
logging.debug('trying to remove relay (%s,%s)' % (type, target))
relay.data[origin].remove([type, target])
relay.save()
except (KeyError, ValueError):
pass
event.done()
cmnds.add('relay-stop', handle_relaystop, 'USER')
examples.add('relay-stop', 'close a relay to another user', 'relay-stop bthate@gmail.com')
def handle_askrelaylist(bot, event):
""" show all relay's of a user. """
origin = event.origin or event.channel
try:
event.reply('relays for %s: ' % origin, relay.data[origin])
except KeyError:
event.reply('no relays for %s' % origin)
cmnds.add('relay-list', handle_askrelaylist, 'OPER')
examples.add('relay-list', 'show all relays of user/channel/wave.', 'relay-list')
def handle_relayblock(bot, event):
""" <type> <target> .. block a user/channel/wave from relaying to us. """
try:
(type, target) = event.args
except ValueError:
event.missing('<type> <target>')
return
origin = event.origin or event.channel
if bot.type == type and origin == target:
event.reply("can't relay to yourself")
return
if not block.data.has_key(origin):
block.data[origin] = []
if not [type, origin] in block.data[target]:
block.data[target].append([type, origin])
block.save()
event.done()
cmnds.add('relay-block', handle_relayblock, 'USER')
examples.add('relay-block', 'block a relay from another user', 'relay-block bthate@gmail.com')
def handle_relayunblock(bot, event):
""" <target> .. remove a relay block of an user. """
try:
target = event.args[0]
except IndexError:
event.missing('<target>')
return
origin = event.origin or event.channel
try:
block.data[origin].remove([bot.name, target])
block.save()
except (KeyError, ValueError):
pass
event.done()
cmnds.add('relay-unblock', handle_relaystop, 'USER')
examples.add('relay-unblock', 'remove a block of another user', 'relay-unblock bthate@gmail.com')
def handle_relayblocklist(bot, event):
""" show all blocks of a user/channel.wave. """
origin = event.origin or event.channel
try:
event.reply('blocks for %s: ' % origin, block.data[origin])
except KeyError:
event.reply('no blocks for %s' % origin)
cmnds.add('relay-blocklist', handle_relayblocklist, 'OPER')
examples.add('relay-blocklist', 'show blocked relays to us', 'relay-blocklist')
| Python |
# commonplugs/8b.py
#
#
""" run the eight ball. """
## gozerlib imports
from gozerlib.utils.exception import handle_exception
from gozerlib.commands import cmnds
from gozerlib.examples import examples
## basic imports
import re
import random
## defines
balltxt=[
"Signs point to yes.",
"Yes.",
"Most likely.",
"Without a doubt.",
"Yes - definitely.",
"As I see it, yes.",
"You may rely on it.",
"Outlook good.",
"It is certain.",
"It is decidedly so.",
"Reply hazy, try again.",
"Better not tell you now.",
"Ask again later.",
"Concentrate and ask again.",
"Cannot predict now.",
"My sources say no.",
"Very doubtful.",
"My reply is no.",
"Outlook not so good.",
"Don't count on it."
]
## commands
def handle_8b(bot, ievent):
""" throw the eight ball. """
ievent.reply(random.choice(balltxt))
cmnds.add('8b', handle_8b, ['USER', 'GUEST'])
examples.add('8b', 'show what the magic 8 ball has to say.', '8b')
| Python |
# plugs/wikipedia.py
#
#
""" query wikipedia .. use countrycode to select a country specific wikipedia. """
## gozerlib imports
from gozerlib.utils.url import geturl, striphtml
from gozerlib.utils.generic import splittxt, handle_exception, fromenc
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.utils.rsslist import rsslist
## generic imports
from urllib import quote
import re
## defines
wikire = re.compile('start content(.*?)end content', re.M)
## functions
def searchwiki(txt, lang='en'):
for i in txt.split():
if i.startswith('-'):
if len(i) != 3:
continue
else:
lang = i[1:]
continue
txt = txt.replace("-%s" % lang, '')
txt = txt.strip().capitalize()
what = txt.strip().replace(' ', '_')
url = 'http://%s.wikipedia.org/wiki/Special:Export/%s' % (lang, \
quote(what.encode('utf-8')))
url2 = 'http://%s.wikipedia.org/wiki/%s' % (lang, \
quote(what.encode('utf-8')))
txt = getwikidata(url)
if not txt:
return None
if 'from other capitalisation' in txt:
what = what.title()
url = 'http://%s.wikipedia.org/wiki/Special:Export/%s' % (lang, \
quote(what.encode('utf-8')))
url2 = 'http://%s.wikipedia.org/wiki/%s' % (lang, \
quote(what.encode('utf-8')))
txt = getwikidata(url)
if '#REDIRECT' in txt or '#redirect' in txt:
redir = ' '.join(txt.split()[1:])
url = 'http://%s.wikipedia.org/wiki/Special:Export/%s' % (lang, \
quote(redir.encode('utf-8')))
url2 = 'http://%s.wikipedia.org/wiki/%s' % (lang, \
quote(redir.encode('utf-8')))
txt = getwikidata(url)
return (txt, url2)
def getwikidata(url):
""" fetch wiki data """
result = fromenc(geturl(url))
if not result:
return
res = rsslist(result)
txt = ""
for i in res:
try:
txt = i['text']
break
except:
pass
if not txt:
return
#txt = re.sub('\[\[Image:([^\[\]]+|\[\[[^\]]+\]\])*\]\]', '', txt)
txt = txt.replace('[[', '')
txt = txt.replace(']]', '')
txt = re.sub('\s+', ' ', txt)
return txt
## commands
def handle_wikipedia(bot, ievent):
""" <what> .. search wikipedia. """
if not ievent.rest:
ievent.missing('<what>')
return
res = searchwiki(ievent.rest)
if not res:
ievent.reply('no result found')
return
txt, url = res
prefix = '%s ===> ' % url
result = splittxt(striphtml(txt).strip())
ievent.reply(prefix, result, raw=True)
cmnds.add('wikipedia', handle_wikipedia, ['USER', 'GUEST'])
examples.add('wikipedia', 'wikipedia ["-" <countrycode>] <what> .. search \
wikipedia for <what>','1) wikipedia gozerbot 2) wikipedia -nl bot')
| Python |
# feedprovider common plugins
#
#
""" this package contains all the plugins common to all drivers. """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
# waveplugs/watcher.py
#
#
""" watch waves through xmpp. a wave is called a channel here. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.callbacks import callbacks, gn_callbacks
from gozerlib.persist import PlugPersist
from gozerlib.fleet import fleet
from gozerlib.utils.exception import handle_exception
from gozerlib.examples import examples
from gozerlib.gae.wave.waves import Wave
## basic imports
import copy
import logging
## defines
cpy = copy.deepcopy
## classes
class Watched(PlugPersist):
""" Watched object contains channels and subscribers. """
def __init__(self, filename):
PlugPersist.__init__(self, filename)
self.data.channels = self.data.channels or {}
self.data.whitelist = self.data.whitelist or []
self.data.descriptions = self.data.descriptions or {}
def subscribe(self, botname, type, channel, jid):
""" subscrive a jid to a channel. """
jid = unicode(jid)
if not self.data.channels.has_key(channel):
self.data.channels[channel] = []
if not [type, jid] in self.data.channels[channel]:
self.data.channels[channel].append([botname, type, jid])
self.save()
return True
def unsubscribe(self, botname, type, channel, jid):
""" unsubscribe a jid from a channel. """
try:
self.data.channels[channel].remove([botname, type, unicode(jid)])
except (KeyError, TypeError):
return False
self.save()
return True
def subscribers(self, channel):
""" get all subscribers of a channel. """
try:
return self.data.channels[channel]
except KeyError:
return []
def check(self, channel):
""" check if channel has subscribers. """
return self.data.channels.has_key(channel)
def channels(self, channel):
""" return all subscribers of a channel. """
try:
return self.data.channels[channel]
except KeyError:
return None
def enable(self, channel):
""" add channel to whitelist. """
if not channel in self.data.whitelist:
self.data.whitelist.append(channel)
self.save()
def disable(self, channel):
""" remove channel from whitelist. """
try:
self.data.whitelist.remove(channel)
except ValueError:
return False
self.save()
return True
def available(self, channel):
""" check if channel is on whitelist. """
return channel in self.data.whitelist
def channels(self, channel):
""" return channels on whitelist. """
res = []
for chan, targets in self.data.channels.iteritems():
if channel in str(targets):
res.append(chan)
return res
## defines
watched = Watched('channels')
## callbacks
def prewatchcallback(bot, event):
""" watch callback precondition. """
logging.debug("watcher - pre - %s - %s - %s" % (event.channel, event.userhost, event.txt))
return watched.check(event.channel) and event.txt
def watchcallback(bot, event):
""" the watcher callback, see if channels are followed and if so send data. """
if not event.txt:
return
subscribers = watched.subscribers(event.channel)
watched.data.descriptions[event.channel] = event.title
logging.debug("watcher - out - %s - %s" % (str(subscribers), event.txt))
for item in subscribers:
try:
(botname, type, channel) = item
except ValueError:
continue
watchbot = fleet.makebot(botname, type)
if watchbot:
orig = event.nick or event.userhost
if event.cbtype == "OUTPUT":
try:
from gozerlib.gae.wave.waves import Wave
wave = Wave(event.channel)
except ImportError:
wave = None
if wave:
txt = u"[%s] %s: %s" % (wave.data.title, event.ruserhost, event.txt)
else:
txt = u"[%s] %s: %s" % (type, event.ruserhost, event.txt)
else:
txt = u"[%s] %s" % (orig, event.txt)
logging.debug("watcher - %s - %s" % (type, txt))
if txt.find('] [') > 1:
continue
watchbot.say(channel, txt)
gn_callbacks.add('BLIP_SUBMITTED', watchcallback, prewatchcallback)
callbacks.add('BLIP_SUBMITTED', watchcallback, prewatchcallback)
gn_callbacks.add('PRIVMSG', watchcallback, prewatchcallback)
#callbacks.add('PRIVMSG', watchcallback, prewatchcallback)
gn_callbacks.add('OUTPUT', watchcallback, prewatchcallback)
callbacks.add('OUTPUT', watchcallback, prewatchcallback)
gn_callbacks.add('MESSAGE', watchcallback, prewatchcallback)
## commands
def handle_watcherstart(bot, event):
""" [<channel>] .. start watching a target (channel/wave). """
if not event.rest:
target = event.origin
else:
target = event.rest
# you can only watch yourself on xmpp/wave
if '@' in target and not event.userhost == target:
event.reply('you are not allowed to watch %s' % target)
return
watched.subscribe(bot.name, bot.type, event.rest, event.channel)
event.done()
try:
wave = Wave(event.rest)
if wave:
wavebot = fleet.makebot(type='wave')
if wavebot:
wave.say(wavebot, "%s is now watching %s" % (event.channel, event.rest))
except Exception, ex:
handle_exception()
cmnds.add('watcher-start', handle_watcherstart, 'USER')
examples.add('watcher-start', 'start watching a channel/wave. ', 'watcher-start <channel>')
def handle_watcherstop(bot, event):
""" [<channel>] .. stop watching a channel/wave. """
if not event.rest:
target = event.origin
else:
target = event.rest
watched.unsubscribe(bot.name, bot.type, target, event.channel)
event.done()
cmnds.add('watcher-stop', handle_watcherstop, 'USER')
examples.add('watcher-stop', 'stop watching a channel', 'watcher-stop #dunkbots')
def handle_watcherchannels(bot, event):
""" see what channels we are watching. """
chans = watched.channels(event.channel)
if chans:
res = []
for chan in chans:
try:
res.append("%s (%s)" % (chan, watched.data.descriptions[chan]))
except KeyError:
res.append(chan)
event.reply("channels watched on %s: " % event.channel, res)
cmnds.add('watcher-channels', handle_watcherchannels, ['USER'])
examples.add('watcher-channels', 'show what channels we are watching', 'watcher-channels')
def handle_watcherlist(bot, event):
"""" show channels that are watching us. """
event.reply("watchers for %s: " % event.channel, watched.subscribers(event.channel))
cmnds.add('watcher-list', handle_watcherlist, ['USER'])
examples.add('watcher-list', 'show channels that are watching us. ', 'watcher-list')
| Python |
# commonplugs/tinyurl.py
#
#
""" tinyurl.com feeder """
__author__ = "Wijnand 'tehmaze' Modderman - http://tehmaze.com"
__license__ = 'BSD'
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.utils.url import striphtml, useragent
from gozerlib.examples import examples
## google imports
try:
from google.appengine.api.memcache import get, set
import google
except ImportError:
def get(name, *args, **kwargs):
return ""
def set(name, value, *args, **kwargs):
return ""
## simpljejson
from simplejson import dumps, loads
## basic imports
import urllib
import urllib2
import urlparse
import re
import logging
## defines
re_url_match = re.compile(u'((?:http|https)://\S+)')
urlcache = {}
## functions
def valid_url(url):
""" check if url is valid """
if not re_url_match.search(url):
return False
parts = urlparse.urlparse(url)
cleanurl = '%s://%s' % (parts[0], parts[1])
if parts[2]:
cleanurl = '%s%s' % (cleanurl, parts[2])
if parts[3]:
cleanurl = '%s;%s' % (cleanurl, parts[3])
if parts[4]:
cleanurl = '%s?%s' % (cleanurl, parts[4])
return cleanurl
def precb(bot, ievent):
test_url = re_url_match.search(ievent.txt)
if test_url:
return 1
def privmsgcb(bot, ievent):
""" callback for urlcaching """
test_url = re_url_match.search(ievent.txt)
if test_url:
url = test_url.group(1)
if not urlcache.has_key(bot.name):
urlcache[bot.name] = {}
urlcache[bot.name][ievent.target] = url
#callbacks.add('PRIVMSG', privmsgcb, precb)
def get_tinyurl(url):
""" grab a tinyurl. """
res = get(url, namespace='tinyurl')
logging.debug('tinyurl - cache - %s' % unicode(res))
if res and res[0] == '[':
return loads(res)
postarray = [
('submit', 'submit'),
('url', url),
]
postdata = urllib.urlencode(postarray)
req = urllib2.Request(url='http://tinyurl.com/create.php', data=postdata)
req.add_header('User-agent', useragent())
try:
res = urllib2.urlopen(req).readlines()
#raise Exception("mekker")
except google.appengine.api.urlfetch_errors.DownloadError, e:
logging.warn('tinyurl - %s - DownloadError: %s' % (url, str(e)))
return
except urllib2.URLError, e:
logging.error('tinyurl - %s - URLError: %s' % (url, str(e)))
return
except urllib2.HTTPError, e:
logging.error('tinyurl - %s - HTTP error: %s' % (url, str(e)))
return
urls = []
for line in res:
if line.startswith('<blockquote><b>'):
urls.append(striphtml(line.strip()).split('[Open')[0])
if len(urls) == 3:
urls.pop(0)
set(url, dumps(urls), namespace='tinyurl')
return urls
def handle_tinyurl(bot, ievent):
""" get tinyurl from provided url. """
if not ievent.rest and (not urlcache.has_key(bot.name) or not \
urlcache[bot.name].has_key(ievent.target)):
ievent.missing('<url>')
return
elif not ievent.rest:
url = urlcache[bot.name][ievent.target]
else:
url = ievent.rest
url = valid_url(url)
if not url:
ievent.reply('invalid or bad URL')
return
tinyurl = get_tinyurl(url)
if tinyurl:
ievent.reply(' .. '.join(tinyurl))
else:
ievent.reply('failed to create tinyurl')
cmnds.add('tinyurl', handle_tinyurl, ['USER', 'GUEST'], threaded=True)
examples.add('tinyurl', 'show a tinyurl', 'tinyurl http://feedprovider.googlecode.com')
| Python |
# waveplugs/ask.py
#
#
""" ask a jabber user a question and relay back the response. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.callbacks import callbacks
from gozerlib.persist import PlugPersist
from gozerlib.examples import examples
from gozerlib.fleet import fleet
## basic imports
import logging
## defines
# EDIT THIS
defaultJID = 'bthate@gmail.com'
# END EDIT
questions = PlugPersist('questions')
experts = PlugPersist('experts')
subjects = PlugPersist('subjects')
## CALLBACKS
def askprecondition(bot, event):
""" check to see whether the callback needs to be executed. """
if event.userhost in questions.data and not event.iscmnd:
return True
def askcallback(bot, event):
""" this is the callbacks that handles the responses to questions. """
sendto = questions.data[event.userhost]
jid = []
channels = []
# responses should start with "jid: txt"
try:
(printto, txt) = event.txt.split(':', 1)
except ValueError:
printto = False
txt = event.txt
txt = txt.strip()
done = []
# loop over all the sendto items and check if we are waiting for a response
for botname, type, userhost, channel in sendto:
# skip if needed
if not printto or userhost != printto:
continue
askbot = fleet.makebot(type)
if not askbot:
askbot = fleet.makebot('xmpp')
logging.debug("ask - %s %s %s %s" % (botname, type, userhost, channel))
if askbot:
askbot.say(channel, "%s says: %s" % (event.userhost, txt))
else:
logging.warn("ask - can't find %s bot in fleet" % type)
continue
# remove response waiting data
try:
questions.data[event.userhost].remove([botname, type, userhost, channel])
questions.save()
except ValueError:
pass
done.append(channel)
break
# give reply back to expert whether we delivered the answer
if done:
event.reply('answer sent to ', done)
callbacks.add('MESSAGE', askcallback, askprecondition)
callbacks.add('EXEC', askcallback, askprecondition)
callbacks.add('WEB', askcallback, askprecondition)
## COMMANDS
def handle_ask(bot, event):
"""
this command lets you ask a question that gets dispatched to jabber
users that have registered themselves for that particular subject.
"""
# determine subject and question from command request
try:
(subject, question) = event.rest.split(' ', 1)
except ValueError:
event.missing('<subject> <question>')
return
# lookup the experts on the subject
try:
expertslist = experts.data[subject]
except KeyError:
if '@' in subject:
expertslist = [subject, ]
else:
expertslist = [defaultJID, ]
# ask the question (send xmpp msg to experts)
try:
from gozerlib.gae.xmpp.bot import XMPPBot
xmppbot = XMPPBot()
except ImportError:
xmppbot = bot
xmppbot.say(expertslist, "%s (%s) asks you: %s" % (event.userhost, bot.name, question))
# register the question so we can wait for the response
asker = event.userhost
for expert in expertslist:
if not questions.data.has_key(expert):
questions.data[expert] = []
questions.data[expert].append([bot.name, bot.type, event.userhost, event.channel])
questions.save()
event.reply('question is sent to %s' % ' .. '.join(expertslist))
logging.debug('options: %s' % str(event.options))
if event.options and '-w' in event.options:
event.reply('wave for this question is created for %s' % event.userhost)
bot = fleet.makebot(type='wave')
if bot:
newwave = bot.newwave(event.context, participants=['feedprovider@appspot.com', event.userhost])
newwave.SetTitle("ask-bot wave: %s" % question)
cmnds.add('ask', handle_ask, ['USER', 'GUEST'], options={'-w': False})
examples.add('ask', 'ask [group|JID] question .. ask a groups of users a question or use a specific JID', 'ask ask-bot what is the mercurial repository')
def handle_askstop(bot, event):
""" remove any waiting data for the user giving the command. """
try:
del questions.data[event.userhost]
except KeyError:
event.reply('no question running')
cmnds.add('ask-stop', handle_askstop, ['USER', 'GUEST'])
examples.add('ask-stop', 'stop listening to answers', 'ask-stop')
def handle_askjoin(bot, event):
""" join the expert list of a subject. """
if bot.type != 'xmpp':
event.reply('this command only works in jabber')
return
try:
subject = event.args[0]
except IndexError:
event.missing('<subject>')
return
if not experts.data.has_key(subject):
experts.data[subject] = []
if not event.userhost in experts.data[subject]:
experts.data[subject].append(event.userhost)
experts.save()
expert = event.userhost
if not subjects.data.has_key(expert):
subjects.data[expert] = []
if not event.userhost in experts.data[expert]:
subjects.data[expert].append(subject)
subjects.save()
event.done()
cmnds.add('ask-join', handle_askjoin, ['USER', 'GUEST'])
examples.add('ask-join', 'ask-join <subject> .. join a subject as an expert', 'ask-join ask-bot')
def handle_askpart(bot, event):
""" leave the expert list of a subject. """
if bot.type != 'xmpp':
event.reply('this command only works in jabber')
return
try:
subject = event.args[0]
except IndexError:
event.missing('<subject>')
try:
experts.data[subject].remove(event.userhost)
except (ValueError, KeyError):
pass
try:
subjects.data[event.userhost].remove(subject)
except (ValueError, KeyError):
pass
event.done()
cmnds.add('ask-part', handle_askpart, ['USER', 'GUEST'])
examples.add('ask-part', 'leave the subject expert list', 'ask-part ask-bot')
def handle_asklist(bot, event):
""" show all available subjects. """
event.reply('available subjects: ', experts.data.keys())
cmnds.add('ask-list', handle_asklist, ['USER', 'GUEST'])
examples.add('ask-list', 'list available subjects', 'ask-list')
def handle_askexperts(bot, event):
""" show all the experts on a subject. """
try:
subject = event.args[0]
except IndexError:
event.missing('<subject>')
return
try:
event.reply('experts on %s: ' % subject, experts.data[subject])
except KeyError:
event.reply('we dont know any experts on this subject yet')
cmnds.add('ask-experts', handle_askexperts, ['USER', 'GUEST'])
examples.add('ask-experts', 'list all experts on a subject', 'ask-experts ask-bot')
def handle_asksubjects(bot, event):
""" show all the subjects an expert handles. """
try:
expert = event.args[0]
except IndexError:
event.missing('<JID>')
return
try:
event.reply('subjects handled by %s: ' % expert, subjects.data[expert])
except KeyError:
event.reply('%s doesnt handle any subjects' % expert)
cmnds.add('ask-subjects', handle_asksubjects, ['USER', 'GUEST'])
examples.add('ask-subjects', 'list all the subjects an expert handles', 'ask-subjects bthate@gmail.com')
| Python |
# waveplugs/gadget.py
#
#
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.persist import PlugPersist
gadgeturls = PlugPersist('gadgeturls')
gadgeturls.data['gadget'] = 'https://feedprovider.appspot.com/gadget.xml'
gadgeturls.data['poll'] = 'https://feedprovider.appspot.com/poll.xml'
gadgeturls.data['iframe'] = 'https://feedprovider.appspot.com/iframe.xml'
gadgeturls.data['loadiframe'] = 'https://feedprovider.appspot.com/loadiframe.xml'
def loadroot(event, url):
if event.rootblip:
from waveapi import element
event.rootblip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def load(event, url):
if event.blip:
from waveapi import element
event.blip.append(element.Gadget(url))
return True
else:
event.reply("can't find root blip.")
return False
def handle_gadgetload(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if load(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-load", handle_gadgetload, 'USER')
examples.add("gadget-load", "load a gadget into a blip", "gadget-load")
def handle_gadgetloadroot(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<gadgetname>')
return
try:
url = gadgeturls.data[event.rest]
if loadroot(event, url):
event.reply('loaded %s' % url)
except KeyError:
event.reply("we don't have a url for %s" % event.rest)
cmnds.add("gadget-loadroot", handle_gadgetloadroot, 'USER')
examples.add("gadget-loadroot", "load a gadget into the root blip", "gadget-loadroot")
def handle_gadgetiframe(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<url>')
return
try:
url = gadgeturls.data['loadiframe'] + "?&iframeurl=%s" % event.rest
event.reply('loading %s' % url)
load(event, url)
except KeyError:
event.reply("we don't have a iframe url")
cmnds.add("gadget-iframe", handle_gadgetiframe, 'USER')
examples.add("gadget-iframe", "load a url into a iframe", "gadget-iframe")
def handle_gadgetaddurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
if not gadgeturls.data.has_key(name):
gadgeturls.data[name] = url
gadgeturls.save()
else:
event.reply("we already have a %s gadget" % name)
cmnds.add("gadget-addurl", handle_gadgetaddurl, 'USER')
examples.add("gadget-addurl", "store a gadget url", "gadget-addurl feedprovider https://feedprovider.appspot.com/iframe.xml")
def handle_gadgetdelurl(bot, event):
try:
(name, url) = event.args
except ValueError:
event.missing('<name> <url>')
return
gadgeturls.data[name] = url
gadgeturls.save()
cmnds.add("gadget-delurl", handle_gadgetdelurl, 'OPER')
examples.add("gadget-delurl", "delete a gadget url", "gadget-delurl mygadget")
def handle_gadgetlist(bot, event):
result = []
for name, url in gadgeturls.data.iteritems():
result.append("%s - %s" % (name, url))
event.reply("available gadgets: ", result)
cmnds.add("gadget-list", handle_gadgetlist, 'USER')
examples.add("gadget-list", "list known gadget urls", "gadget-list")
| Python |
# feedprovider wave plugins
#
#
""" this package contains all wave related plugins. """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
# waveplugs/clone.py
#
#
""" clone the wave after x blips. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.callbacks import callbacks
from gozerlib.gae.wave.waves import Wave
from gozerlib.plugins import plugs
## basic imports
import logging
## callbacks
def clonecallback(bot, event):
if not event.type == "wave":
return
wave = event.chan
if wave.data.threshold != -1 and (wave.data.seenblips > wave.data.threshold):
wave.data.threshold = -1
newwave = wave.clone(bot, event, event.title)
plugs.load('commonplugs.hubbub')
feeds = plugs['commonplugs.hubbub'].watcher.clone(bot.name, bot.type, newwave.data.waveid, event.waveid)
event.reply("this wave is continued to %s with the following feeds: %s" % (newwave.data.url, feeds))
callbacks.add("BLIP_SUBMITTED", clonecallback)
callbacks.add('OUTPUT', clonecallback)
| Python |
# waveplugs/wave.py
#
#
""" wave related commands. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.utils.exception import handle_exception
from gozerlib.persist import PlugPersist
from gozerlib.callbacks import callbacks
from gozerlib.plugins import plugs
from gozerlib.gae.wave.waves import Wave
## basic imports
import logging
def handle_waveclone(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
wave = event.chan
event.reply("cloning ...")
newwave = wave.clone(bot, event, event.root.title.strip())
if not newwave:
event.reply("can't create new wave")
return
plugs.load('commonplugs.hubbub')
feeds = plugs['commonplugs.hubbub'].watcher.clone(bot.name, bot.type, newwave.data.waveid, event.waveid)
event.reply("this wave is continued to %s with the following feeds: %s" % (newwave.data.url, feeds))
cmnds.add('wave-clone', handle_waveclone, 'USER')
examples.add('wave-clone', 'clone the wave', 'wave-clone')
def handle_wavenew(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
parts = ['feedprovider@appspot.com', event.userhost]
newwave = bot.newwave(event.domain, parts)
if event.rest:
newwave.SetTitle(event.rest)
event.done()
cmnds.add('wave-new', handle_wavenew, 'USER')
examples.add('wave-new', 'make a new wave', 'wave-new')
def handle_wavepublic(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
event.root.participants.add('public@a.gwave.com')
event.done()
cmnds.add('wave-public', handle_wavepublic, 'USER')
examples.add('wave-public', 'make the wave public', 'wave-public')
def handle_waveinvite(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing('<who>')
return
event.root.participants.add(event.rest)
event.done()
cmnds.add('wave-invite', handle_waveinvite, 'USER')
examples.add('wave-invite', 'invite a user/bot into the wave', 'wave-invite bthate@googlewave.com')
def handle_waveid(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
event.reply(event.waveid)
cmnds.add('wave-id', handle_waveid, 'USER')
examples.add('wave-id', 'show the id of the wave the command is given in.', 'wave-id')
def handle_waveurl(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
event.reply(event.url)
cmnds.add('wave-url', handle_waveurl, 'USER')
examples.add('wave-url', 'show the url of the wave the command is given in.', 'wave-url')
def handle_waveparticipants(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
event.reply("participants: ", list(event.root.participants))
cmnds.add('wave-participants', handle_waveparticipants, 'USER')
examples.add('wave-participants', 'show the participants of the wave the command is given in.', 'wave-participants')
def handle_wavepart(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
event.reply('bye')
cmnds.add('wave-part', handle_wavepart, 'OPER')
examples.add('wave-part', 'leave the wave', 'wave-part')
def handle_wavetitle(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
if not event.rest:
event.missing("<title>")
return
event.set_title(event.rest)
event.reply('done')
cmnds.add('wave-title', handle_wavetitle, 'OPER')
examples.add('wave-title', 'set title of the wave', 'wave-title')
def handle_wavedata(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
wave = event.chan
if wave:
data = dict(wave.data)
del data['passwords']
del data['json_data']
event.reply(str(data))
else:
event.reply("can't fetch wave data of wave %s" % wave.waveid)
cmnds.add('wave-data', handle_wavedata, 'OPER')
examples.add('wave-data', 'show the waves stored data', 'wave-data')
def handle_wavethreshold(bot, event):
if event.type != "wave":
event.reply("this command only works in google wave.");
return
try:
nrblips = int(event.rest)
except ValueError:
nrblips = -1
wave = event.chan
if wave:
if nrblips == -1:
event.reply('threshold of "%s" is %s' % (wave.data.title, str(wave.data.threshold)))
return
wave.data.threshold = nrblips
wave.save()
event.reply('threshold of "%s" set to %s' % (wave.data.title, str(wave.data.threshold)))
cmnds.add('wave-threshold', handle_wavethreshold, 'OPER')
examples.add('wave-threshold', 'set nr of blips after which we clone the wave', 'wave-threshold')
| Python |
# handler_hubbub.py
#
#
## gozerlib imports
from gozerlib.contrib import feedparser
from gozerlib.utils.generic import getversion
from gozerlib.plugins import plugs
## google imports
from google.appengine.api import urlfetch
from google.appengine.api import xmpp
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import xmpp_handlers
## basic imports
import base64
import logging
import urllib
import urlparse
import uuid
logging.warn(getversion('HUBBUB'))
if not plugs.has_key("commonplugs.hubbub"):
p = plugs.load("commonplugs.hubbub")
else:
p = plugs["commonplugs.hubbub"]
class CallbackHandler(webapp.RequestHandler):
def get(self):
logging.warn('hubbub - incoming GET')
if self.request.GET['hub.mode'] == 'unsubscribe':
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(self.request.GET['hub.challenge'])
return
if self.request.GET['hub.mode'] != 'subscribe':
self.error(400)
return
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(self.request.GET['hub.challenge'])
def post(self):
"""Handles new content notifications."""
logging.warn("hubbub - incoming POST")
try:
p.watcher.incoming(self.request.body)
except IndexError:
logging.error("hubbub plugin did not load properly")
application = webapp.WSGIApplication([('/(?:hubbub)', CallbackHandler)], debug=False)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
# handler_remote.py
#
#
""" FEEDPROVIDER Remote Events Network. """
import time
starttime = time.time()
## gozerlib imports
from gozerlib.remote.bot import RemoteBot
from gozerlib.remote.event import RemoteEvent
from gozerlib.utils.generic import fromenc, toenc, getversion
from gozerlib.utils.xmpp import stripped
from gozerlib.plugins import plugs
from gozerlib.persist import Persist
from gozerlib.utils.exception import handle_exception
from gozerlib.boot import boot
## google imports
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users as gusers
## simplejson import
from simplejson import loads
## basic imports
import wsgiref.handlers
import sys
import time
import types
import os
import logging
logging.info(getversion('REMOTE'))
#boot()
bot = RemoteBot()
#plugs.loadall()
class EventNetHandler(webapp.RequestHandler):
""" the bots remote event dispatcher. """
def post(self):
""" this is where the command get disaptched. """
logging.debug("REMOTE incoming: %s" % self.request.remote_addr)
event = RemoteEvent()
event.parse(self.response, self.request)
event.bot = bot
event.title = event.channel
try:
event.bot.doevent(event)
except Exception, ex:
handle_exception(event)
get = post
# the application
application = webapp.WSGIApplication([('/remote', EventNetHandler),
('/remote/', EventNetHandler)],
debug=True)
def main():
global bot
global application
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run all unit tests in this package."""
import blip_test
import element_test
import module_test_runner
import ops_test
import robot_test
import util_test
import wavelet_test
def RunUnitTests():
"""Runs all registered unit tests."""
test_runner = module_test_runner.ModuleTestRunner()
test_runner.modules = [
blip_test,
element_test,
ops_test,
robot_test,
util_test,
wavelet_test,
]
test_runner.RunAllTests()
if __name__ == "__main__":
RunUnitTests()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Tests for google3.walkabout.externalagents.api.commandline_robot_runner."""
__author__ = 'douwe@google.com (Douwe Osinga)'
import StringIO
from google3.pyglib import app
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.walkabout.externalagents.api import commandline_robot_runner
from google3.walkabout.externalagents.api import events
FLAGS = flags.FLAGS
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13",'
'"annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/otherguy@test.com","value":"Other"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
class CommandlineRobotRunnerTest(googletest.TestCase):
def testSimpleFlow(self):
FLAGS.eventdef_wavelet_participants_changed = 'x'
flag = 'eventdef_' + events.WaveletParticipantsChanged.type.lower()
setattr(FLAGS, flag, 'w.title="New title!"')
input_stream = StringIO.StringIO(TEST_JSON)
output_stream = StringIO.StringIO()
commandline_robot_runner.run_bot(input_stream, output_stream)
res = output_stream.getvalue()
self.assertTrue('wavelet.setTitle' in res)
def main(unused_argv):
googletest.main()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the util module."""
__author__ = 'davidbyttow@google.com (David Byttow)'
import unittest
import ops
import util
class TestUtils(unittest.TestCase):
"""Tests utility functions."""
def testIsIterable(self):
self.assertTrue(util.is_iterable([]))
self.assertTrue(util.is_iterable({}))
self.assertTrue(util.is_iterable(set()))
self.assertTrue(util.is_iterable(()))
self.assertFalse(util.is_iterable(42))
self.assertFalse(util.is_iterable('list?'))
self.assertFalse(util.is_iterable(object))
def testIsDict(self):
self.assertFalse(util.is_dict([]))
self.assertTrue(util.is_dict({}))
self.assertFalse(util.is_dict(set()))
self.assertFalse(util.is_dict(()))
self.assertFalse(util.is_dict(42))
self.assertFalse(util.is_dict('dict?'))
self.assertFalse(util.is_dict(object))
def testIsUserDefinedNewStyleClass(self):
class OldClass:
pass
class NewClass(object):
pass
self.assertFalse(util.is_user_defined_new_style_class(OldClass()))
self.assertTrue(util.is_user_defined_new_style_class(NewClass()))
self.assertFalse(util.is_user_defined_new_style_class({}))
self.assertFalse(util.is_user_defined_new_style_class(()))
self.assertFalse(util.is_user_defined_new_style_class(42))
self.assertFalse(util.is_user_defined_new_style_class('instance?'))
def testLowerCamelCase(self):
self.assertEquals('foo', util.lower_camel_case('foo'))
self.assertEquals('fooBar', util.lower_camel_case('foo_bar'))
self.assertEquals('fooBar', util.lower_camel_case('fooBar'))
self.assertEquals('blipId', util.lower_camel_case('blip_id'))
self.assertEquals('fooBar', util.lower_camel_case('foo__bar'))
self.assertEquals('fooBarBaz', util.lower_camel_case('foo_bar_baz'))
self.assertEquals('f', util.lower_camel_case('f'))
self.assertEquals('f', util.lower_camel_case('f_'))
self.assertEquals('', util.lower_camel_case(''))
self.assertEquals('', util.lower_camel_case('_'))
self.assertEquals('aBCDEF', util.lower_camel_case('_a_b_c_d_e_f_'))
def testUpperCamelCase(self):
self.assertEquals('Foo', util.upper_camel_case('foo'))
self.assertEquals('FooBar', util.upper_camel_case('foo_bar'))
self.assertEquals('FooBar', util.upper_camel_case('foo__bar'))
self.assertEquals('FooBarBaz', util.upper_camel_case('foo_bar_baz'))
self.assertEquals('F', util.upper_camel_case('f'))
self.assertEquals('F', util.upper_camel_case('f_'))
self.assertEquals('', util.upper_camel_case(''))
self.assertEquals('', util.upper_camel_case('_'))
self.assertEquals('ABCDEF', util.upper_camel_case('_a_b_c_d_e_f_'))
def assertListsEqual(self, a, b):
self.assertEquals(len(a), len(b))
for i in range(len(a)):
self.assertEquals(a[i], b[i])
def assertDictsEqual(self, a, b):
self.assertEquals(len(a.keys()), len(b.keys()))
for k, v in a.iteritems():
self.assertEquals(v, b[k])
def testSerializeList(self):
data = [1, 2, 3]
output = util.serialize(data)
self.assertListsEqual(data, output)
def testSerializeDict(self):
data = {'key': 'value'}
output = util.serialize(data)
self.assertDictsEqual(data, output)
def testSerializeAttributes(self):
class Data(object):
def __init__(self):
self.public = 1
self._protected = 2
self.__private = 3
def Func(self):
pass
data = Data()
output = util.serialize(data)
# Functions and non-public fields should not be serialized.
self.assertEquals(1, len(output.keys()))
self.assertEquals(data.public, output['public'])
def testStringEnum(self):
util.StringEnum()
single = util.StringEnum('foo')
self.assertEquals('foo', single.foo)
multi = util.StringEnum('foo', 'bar')
self.assertEquals('foo', multi.foo)
self.assertEquals('bar', multi.bar)
def testParseMarkup(self):
self.assertEquals('foo', util.parse_markup('foo'))
self.assertEquals('foo bar', util.parse_markup('foo <b>bar</b>'))
self.assertEquals('foo\nbar', util.parse_markup('foo<br>bar'))
self.assertEquals('foo\nbar', util.parse_markup('foo<p indent="3">bar'))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for operations that can be applied to the server.
Contains classes and utilities for creating operations that are to be
applied on the server.
"""
import errors
import random
import util
import sys
PROTOCOL_VERSION = '0.2'
# Operation Types
WAVELET_APPEND_BLIP = 'wavelet.appendBlip'
WAVELET_SET_TITLE = 'wavelet.setTitle'
WAVELET_ADD_PARTICIPANT = 'wavelet.participant.add'
WAVELET_DATADOC_SET = 'wavelet.datadoc.set'
WAVELET_MODIFY_TAG = 'wavelet.modifyTag'
BLIP_CREATE_CHILD = 'blip.createChild'
BLIP_DELETE = 'blip.delete'
DOCUMENT_APPEND_MARKUP = 'document.appendMarkup'
DOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert'
DOCUMENT_MODIFY = 'document.modify'
ROBOT_CREATE_WAVELET = 'robot.createWavelet'
ROBOT_FETCH_WAVE = 'robot.fetchWave'
ROBOT_NOTIFY_CAPABILITIES_HASH = 'robot.notifyCapabilitiesHash'
class Operation(object):
"""Represents a generic operation applied on the server.
This operation class contains data that is filled in depending on the
operation type.
It can be used directly, but doing so will not result
in local, transient reflection of state on the blips. In other words,
creating a 'delete blip' operation will not remove the blip from the local
context for the duration of this session. It is better to use the OpBased
model classes directly instead.
"""
def __init__(self, method, opid, params):
"""Initializes this operation with contextual data.
Args:
method: Method to call or type of operation.
opid: The id of the operation. Any callbacks will refer to these.
params: An operation type dependent dictionary
"""
self.method = method
self.id = opid
self.params = params
def __str__(self):
return '%s[%s]%s' % (self.method, self.id, str(self.params))
def set_param(self, param, value):
self.params[param] = value
return self
def serialize(self, method_prefix=''):
"""Serialize the operation.
Args:
method_prefix: prefixed for each method name to allow for specifying
a namespace.
Returns:
a dict representation of the operation.
"""
if method_prefix and not method_prefix.endswith('.'):
method_prefix += '.'
return {'method': method_prefix + self.method,
'id': self.id,
'params': util.serialize(self.params)}
def set_optional(self, param, value):
"""Sets an optional parameter.
If value is None or "", this is a no op. Otherwise it calls
set_param.
"""
if value == '' or value is None:
return self
else:
return self.set_param(param, value)
class OperationQueue(object):
"""Wraps the queuing of operations using easily callable functions.
The operation queue wraps single operations as functions and queues the
resulting operations in-order. Typically there shouldn't be a need to
call this directly unless operations are needed on entities outside
of the scope of the robot. For example, to modify a blip that
does not exist in the current context, you might specify the wave, wavelet
and blip id to generate an operation.
Any calls to this will not be reflected in the robot in any way.
For example, calling wavelet_append_blip will not result in a new blip
being added to the robot, only an operation to be applied on the
server.
"""
# Some class global counters:
_next_operation_id = 1
def __init__(self, proxy_for_id=None):
self.__pending = []
self._capability_hash = 0
self._proxy_for_id = proxy_for_id
def _new_blipdata(self, wave_id, wavelet_id, initial_content='',
parent_blip_id=None):
"""Creates JSON of the blip used for this session."""
temp_blip_id = 'TBD_%s_%s' % (wavelet_id,
hex(random.randint(0, sys.maxint)))
return {'waveId': wave_id,
'waveletId': wavelet_id,
'blipId': temp_blip_id,
'content': initial_content,
'parentBlipId': parent_blip_id}
def _new_waveletdata(self, domain, participants):
"""Creates an ephemeral WaveletData instance used for this session.
Args:
domain: the domain to create the data for.
participants initially on the wavelet
Returns:
Blipdata (for the rootblip), WaveletData.
"""
wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint))
wavelet_id = domain + '!conv+root'
root_blip_data = self._new_blipdata(wave_id, wavelet_id)
participants = set(participants)
wavelet_data = {'waveId': wave_id,
'waveletId': wavelet_id,
'rootBlipId': root_blip_data['blipId'],
'participants': participants}
return root_blip_data, wavelet_data
def __len__(self):
return len(self.__pending)
def __iter__(self):
return self.__pending.__iter__()
def clear(self):
self.__pending = []
def proxy_for(self, proxy):
"""Return a view of this operation queue with the proxying for set to proxy.
This method returns a new instance of an operation queue that shares the
operation list, but has a different proxying_for_id set so the robot using
this new queue will send out operations with the proxying_for field set.
"""
res = OperationQueue()
res.__pending = self.__pending
res._capability_hash = self._capability_hash
res._proxy_for_id = proxy
return res
def set_capability_hash(self, capability_hash):
self._capability_hash = capability_hash
def serialize(self):
first = Operation(ROBOT_NOTIFY_CAPABILITIES_HASH,
'0',
{'capabilitiesHash': self._capability_hash})
operations = [first] + self.__pending
res = util.serialize(operations)
return res
def copy_operations(self, other_queue):
"""Copy the pending operations from other_queue into this one."""
for op in other_queue:
self.__pending.append(op)
def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops):
"""Creates and adds a new operation to the operation list."""
if props is None:
props = {}
props.update(kwprops)
props['waveId'] = wave_id
props['waveletId'] = wavelet_id
if self._proxy_for_id:
props['proxyingFor'] = self._proxy_for_id
operation = Operation(method,
'op%s' % OperationQueue._next_operation_id,
props)
self.__pending.append(operation)
OperationQueue._next_operation_id += 1
return operation
def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''):
"""Appends a blip to a wavelet.
Args:
wave_id: The wave id owning the containing wavelet.
wavelet_id: The wavelet id that this blip should be appended to.
initial_content: optionally the content to start with
Returns:
JSON representing the information of the new blip.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content)
self.new_operation(WAVELET_APPEND_BLIP, wave_id,
wavelet_id, blipData=blip_data)
return blip_data
def wavelet_add_participant(self, wave_id, wavelet_id, participant_id):
"""Adds a participant to a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
participant_id: Id of the participant to add.
Returns:
data for the root_blip, wavelet
"""
return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id,
participantId=participant_id)
def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data):
"""Sets a key/value pair on the data document of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
name: The key name for this data.
data: The value of the data to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id,
datadocName=name, datadocValue=data)
def robot_create_wavelet(self, domain, participants=None, message=''):
"""Creates a new wavelet.
Args:
domain: the domain to create the wave in
participants: initial participants on this wavelet or None if none
message: an optional payload that is returned with the corresponding
event.
Returns:
data for the root_blip, wavelet
"""
if participants is None:
participants = []
blip_data, wavelet_data = self._new_waveletdata(domain, participants)
op = self.new_operation(ROBOT_CREATE_WAVELET,
wave_id=wavelet_data['waveId'],
wavelet_id=wavelet_data['waveletId'],
waveletData=wavelet_data)
op.set_optional('message', message)
return blip_data, wavelet_data
def robot_fetch_wave(self, wave_id, wavelet_id):
"""Requests a snapshot of the specified wave.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id)
def wavelet_set_title(self, wave_id, wavelet_id, title):
"""Sets the title of a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
title: The title to set.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id,
waveletTitle=title)
def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None):
"""Modifies a tag in a wavelet.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
tag: The tag (a string).
modify_how: (optional) how to apply the tag. The default is to add
the tag. Specify 'remove' to remove. Specify None or 'add' to
add.
Returns:
The operation created.
"""
return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id,
name=tag).set_optional("modify_how", modify_how)
def blip_create_child(self, wave_id, wavelet_id, blip_id):
"""Creates a child blip of another blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
JSON of blip for which further operations can be applied.
"""
blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id)
self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id,
blipId=blip_id,
blipData=blip_data)
return blip_data
def blip_delete(self, wave_id, wavelet_id, blip_id):
"""Deletes the specified blip.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id)
def document_append_markup(self, wave_id, wavelet_id, blip_id, content):
"""Appends content with markup to a document.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
content: The markup content to append.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id,
blipId=blip_id, content=content)
def document_modify(self, wave_id, wavelet_id, blip_id):
"""Creates and queues a document modify operation
The returned operation still needs to be filled with details before
it makes sense.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
Returns:
The operation created.
"""
return self.new_operation(DOCUMENT_MODIFY,
wave_id,
wavelet_id,
blipId=blip_id)
def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position):
"""Inserts an inline blip at a specific location.
Args:
wave_id: The wave id owning that this operation is applied to.
wavelet_id: The wavelet id that this operation is applied to.
blip_id: The blip id that this operation is applied to.
position: The position in the document to insert the blip.
Returns:
JSON data for the blip that was created for further operations.
"""
inline_blip_data = self._new_blipdata(wave_id, wavelet_id)
inline_blip_data['parentBlipId'] = blip_id
self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id,
blipId=blip_id,
index=position,
blipData=inline_blip_data)
return inline_blip_data
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defines the ModuleTestRunnerClass."""
import unittest
class ModuleTestRunner(object):
"""Responsible for executing all test cases in a list of modules."""
def __init__(self, module_list=None, module_test_settings=None):
self.modules = module_list or []
self.settings = module_test_settings or {}
def RunAllTests(self):
"""Executes all tests present in the list of modules."""
runner = unittest.TextTestRunner()
for module in self.modules:
for setting, value in self.settings.iteritems():
try:
setattr(module, setting, value)
except AttributeError:
print '\nError running ' + str(setting)
print '\nRunning all tests in module', module.__name__
runner.run(unittest.defaultTestLoader.loadTestsFromModule(module))
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to run wave robots on app engine."""
import logging
import sys
import events
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class GetHandler(webapp.RequestHandler):
"""Handler to forward a request ot a handler of a robot."""
def __init__(self, method, contenttype):
"""Initializes this handler with a specific robot."""
self._method = method
self._contenttype = contenttype
def get(self):
"""Handles HTTP GET request."""
self.response.headers['Content-Type'] = self._contenttype
self.response.out.write(self._method())
class RobotEventHandler(webapp.RequestHandler):
"""Handler for the dispatching of events to various handlers to a robot.
This handler only responds to post events with a JSON post body. Its primary
task is to separate out the context data from the events in the post body
and dispatch all events in order. Once all events have been dispatched
it serializes the context data and its associated operations as a response.
"""
def __init__(self, robot):
"""Initializes self with a specific robot."""
self._robot = robot
def get(self):
"""Handles the get event for debugging.
This is useful for debugging but since event bundles tend to be
rather big it often won't fit for more complex requests.
"""
ops = self.request.get('events')
if ops:
self.request.body = events
self.post()
def post(self):
"""Handles HTTP POST requests."""
json_body = self.request.body
if not json_body:
# TODO(davidbyttow): Log error?
return
# Redirect stdout to stderr while executing handlers. This way, any stray
# "print" statements in bot code go to the error logs instead of breaking
# the JSON response sent to the HTTP channel.
saved_stdout, sys.stdout = sys.stdout, sys.stderr
json_body = unicode(json_body, 'utf8')
logging.info('Incoming: %s', json_body)
json_response = self._robot.process_events(json_body)
logging.info('Outgoing: %s', json_response)
sys.stdout = saved_stdout
# Build the response.
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(json_response.encode('utf-8'))
def operation_error_handler(event, wavelet):
"""Default operation error handler, logging what went wrong."""
if isinstance(event, events.OperationError):
logging.error('Previously operation failed: id=%s, message: %s',
event.operation_id, event.error_message)
def appengine_post(url, data, headers):
result = urlfetch.fetch(
method='POST',
url=url,
payload=data,
headers=headers,
deadline=10)
return result.status_code, result.content
class RobotVerifyTokenHandler(webapp.RequestHandler):
"""Handler for the token_verify request."""
def __init__(self, robot):
"""Initializes self with a specific robot."""
self._robot = robot
def get(self):
"""Handles the get event for debugging. Ops usually too long."""
token, st = self._robot.get_verification_token_info()
logging.info('token=' + token)
if token is None:
self.error(404)
self.response.out.write('No token set')
return
if not st is None:
if self.request.get('st') != st:
self.response.out.write('Invalid st value passed')
return
self.response.out.write(token)
def create_robot_webapp(robot, debug=False, extra_handlers=None):
"""Returns an instance of webapp.WSGIApplication with robot handlers."""
if not extra_handlers:
extra_handlers = []
return webapp.WSGIApplication([('/_wave/capabilities.xml',
lambda: GetHandler(robot.capabilities_xml,
'application/xml')),
('/_wave/robot/profile',
lambda: GetHandler(robot.profile_json,
'application/json')),
('/_wave/robot/jsonrpc',
lambda: RobotEventHandler(robot)),
('/_wave/verify_token',
lambda: RobotVerifyTokenHandler(robot)),
] + extra_handlers,
debug=debug)
def run(robot, debug=False, log_errors=True, extra_handlers=None):
"""Sets up the webapp handlers for this robot and starts listening.
A robot is typically setup in the following steps:
1. Instantiate and define robot.
2. Register various handlers that it is interested in.
3. Call Run, which will setup the handlers for the app.
For example:
robot = Robot('Terminator',
image_url='http://www.sky.net/models/t800.png',
profile_url='http://www.sky.net/models/t800.html')
robot.register_handler(WAVELET_PARTICIPANTS_CHANGED, KillParticipant)
run(robot)
Args:
robot: the robot to run. This robot is modified to use app engines
urlfetch for posting http.
debug: Optional variable that defaults to False and is passed through
to the webapp application to determine if it should show debug info.
log_errors: Optional flag that defaults to True and determines whether
a default handlers to catch errors should be setup that uses the
app engine logging to log errors.
extra_handlers: Optional list of tuples that are passed to the webapp
to install more handlers. For example, passing
[('/about', AboutHandler),] would install an extra about handler
for the robot.
"""
# App Engine expects to construct a class with no arguments, so we
# pass a lambda that constructs the appropriate handler with
# arguments from the enclosing scope.
if log_errors:
robot.register_handler(events.OperationError, operation_error_handler)
robot.http_post = appengine_post
app = create_robot_webapp(robot, debug, extra_handlers)
run_wsgi_app(app)
| Python |
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
import simplejson
import cgi
class JSONFilter(object):
def __init__(self, app, mime_type='text/x-json'):
self.app = app
self.mime_type = mime_type
def __call__(self, environ, start_response):
# Read JSON POST input to jsonfilter.json if matching mime type
response = {'status': '200 OK', 'headers': []}
def json_start_response(status, headers):
response['status'] = status
response['headers'].extend(headers)
environ['jsonfilter.mime_type'] = self.mime_type
if environ.get('REQUEST_METHOD', '') == 'POST':
if environ.get('CONTENT_TYPE', '') == self.mime_type:
args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _]
data = environ['wsgi.input'].read(*map(int, args))
environ['jsonfilter.json'] = simplejson.loads(data)
res = simplejson.dumps(self.app(environ, json_start_response))
jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp')
if jsonp:
content_type = 'text/javascript'
res = ''.join(jsonp + ['(', res, ')'])
elif 'Opera' in environ.get('HTTP_USER_AGENT', ''):
# Opera has bunk XMLHttpRequest support for most mime types
content_type = 'text/plain'
else:
content_type = self.mime_type
headers = [
('Content-type', content_type),
('Content-length', len(res)),
]
headers.extend(response['headers'])
start_response(response['status'], headers)
return [res]
def factory(app, global_conf, **kw):
return JSONFilter(app, **kw)
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from scanner import make_scanner
try:
from _speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, pos)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
"""Implementation of JSONEncoder
"""
import re
try:
from _speedups import encode_basestring_ascii as \
c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from _speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
from decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and not self.indent and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.0'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
try:
from collections import OrderedDict
except ImportError:
from ordered_dict import OrderedDict
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
**kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
try:
from simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
) | Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile, object_pairs_hook=json.OrderedDict)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ')
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the generic robot classes.
This module provides the Robot class and RobotListener interface,
as well as some helper functions for web requests and responses.
"""
import base64
import logging
import sys
try:
__import__('google3') # setup internal test environment
except ImportError:
pass
import simplejson
import blip
import events
import ops
import util
import wavelet
import errors
# We only import oauth when we need it
oauth = None
DEFAULT_PROFILE_URL = (
'http://code.google.com/apis/wave/extensions/robots/python-tutorial.html')
class Robot(object):
"""Robot metadata class.
This class holds on to basic robot information like the name and profile.
It also maintains the list of event handlers and cron jobs and
dispatches events to the appropriate handlers.
"""
def __init__(self, name, image_url='', profile_url=DEFAULT_PROFILE_URL):
"""Initializes self with robot information.
Args:
name: The name of the robot
image_url: (optional) url of an image that should be used as the avatar
for this robot.
profile_url: (optional) url of a webpage with more information about
this robot.
"""
self._handlers = {}
self._name = name
self._verification_token = None
self._st = None
self._consumer_key = None
self._consumer_secret = None
self._server_rpc_base = None
self._profile_handler = None
self._image_url = image_url
self._profile_url = profile_url
self._capability_hash = 0
@property
def name(self):
"""Returns the name of the robot."""
return self._name
@property
def image_url(self):
"""Returns the URL of the avatar image."""
return self._image_url
@property
def profile_url(self):
"""Returns the URL of an info page for the robot."""
return self._profile_url
def http_post(self, url, data, headers):
"""Execute an http post.
Monkey patch this method to use something other than
the default urllib.
Args:
url: to post to
body: post body
headers: extra headers to pass along
Returns:
response_code, returned_page
"""
import urllib2
req = urllib2.Request(url,
data=data,
headers=headers)
try:
f = urllib2.urlopen(req)
return f.code, f.read()
except urllib2.URLError, e:
return e.code, e.read()
def get_verification_token_info(self):
"""Returns the verification token and ST parameter."""
return self._verification_token, self._st
def capabilities_hash(self):
"""Return the capabilities hash as a hex string."""
return hex(self._capability_hash)
def register_handler(self, event_class, handler, context=None, filter=None):
"""Registers a handler on a specific event type.
Multiple handlers may be registered on a single event type and are
guaranteed to be called in order of registration.
The handler takes two arguments, the event object and the corresponding
wavelet.
Args:
event_class: An event to listen for from the classes defined in the
events module.
handler: A function handler which takes two arguments, the wavelet for
the event and the event object.
context: The context to provide for this handler.
filter: Depending on the event, a filter can be specified that restricts
for which values the event handler will be called from the server.
Valuable to restrict the amount of traffic send to the robot.
"""
payload = (handler, event_class, context, filter)
self._handlers.setdefault(event_class.type, []).append(payload)
if type(context) == list:
context = ','.join(context)
self._capability_hash = (self._capability_hash * 13 +
hash(event_class.type) +
hash(context) +
hash(filter)) & 0xfffffff
def set_verification_token_info(self, token, st=None):
"""Set the verification token used in the ownership verification.
/wave/robot/register starts this process up and will produce this token.
Args:
token: the token provided by /wave/robot/register.
st: optional parameter to verify the request for the token came from
the wave server.
"""
self._verification_token = token
self._st = st
def setup_oauth(self, consumer_key, consumer_secret,
server_rpc_base='http://gmodules.com/api/rpc'):
"""Configure this robot to use the oauth'd json rpc.
Args:
consumer_key: consumer key received from the verification process.
consumer_secret: secret received from the verification process.
server_rpc_base: url of the rpc gateway to use. Specify None for default.
For wave preview, http://gmodules.com/api/rpc should be used.
For wave sandbox, http://sandbox.gmodules.com/api/rpc should be used.
"""
# Import oauth inline and using __import__ for pyexe compatibility
# when oauth is not installed.
global oauth
__import__('waveapi.oauth')
oauth = sys.modules['waveapi.oauth']
self._server_rpc_base = server_rpc_base
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._oauth_signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self._oauth_consumer = oauth.OAuthConsumer(self._consumer_key,
self._consumer_secret)
def register_profile_handler(self, handler):
"""Sets the profile handler for this robot.
The profile handler will be called when a profile is needed. The handler
gets passed the name for which a profile is needed or None for the
robot itself. A dictionary with keys for name, imageUrl and
profileUrl should be returned.
"""
self._profile_handler = handler
def _hash(self, value):
"""return b64encoded sha1 hash of value."""
try:
hashlib = __import__('hashlib') # 2.5
hashed = hashlib.sha1(value)
except ImportError:
import sha # deprecated
hashed = sha.sha(value)
return base64.b64encode(hashed.digest())
def make_rpc(self, operations):
"""Make an rpc call, submitting the specified operations."""
if not oauth or not self._oauth_consumer.key:
raise errors.Error('OAuth has not been configured')
if (not type(operations) == list and
not isinstance(operations, ops.OperationQueue)):
operations = [operations]
rpcs = [op.serialize(method_prefix='wave') for op in operations]
post_body = simplejson.dumps(rpcs)
body_hash = self._hash(post_body)
params = {
'oauth_consumer_key': 'google.com:' + self._oauth_consumer.key,
'oauth_timestamp': oauth.generate_timestamp(),
'oauth_nonce': oauth.generate_nonce(),
'oauth_version': oauth.OAuthRequest.version,
'oauth_body_hash': body_hash,
}
oauth_request = oauth.OAuthRequest.from_request('POST',
self._server_rpc_base,
parameters=params)
oauth_request.sign_request(self._oauth_signature_method,
self._oauth_consumer,
None)
code, content = self.http_post(
url=oauth_request.to_url(),
data=post_body,
headers={'Content-Type': 'application/json'})
logging.info(oauth_request.to_url())
if code != 200:
logging.info(oauth_request.to_url())
logging.info(content)
raise IOError('HttpError ' + str(code))
return simplejson.loads(content)
def _first_rpc_result(self, result):
"""result is returned from make_rpc. Get the first data record
or throw an exception if it was an error."""
if type(result) == list:
result = result[0]
error = result.get('error')
if error:
raise errors.Error('RPC Error' + str(error['code'])
+ ': ' + error['message'])
data = result.get('data')
if data:
return data
raise errors.Error('RPC Error: No data record.')
def capabilities_xml(self):
"""Return this robot's capabilities as an XML string."""
lines = []
for capability, payloads in self._handlers.items():
for payload in payloads:
handler, event_class, context, filter = payload
line = ' <w:capability name="%s"' % capability
if context and type(context) == list:
line += ' context="%s"' % (',').join(context)
elif context:
line += ' context="%s"' % context
if filter:
line += ' filter="%s"' % filter
line += '/>\n'
lines.append(line)
if self._consumer_key:
oauth_tag = '<w:consumer_key>%s</w:consumer_key>\n' % self._consumer_key
else:
oauth_tag = ''
return ('<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>%s</w:version>\n'
'%s'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n'
'%s'
'</w:capabilities>\n'
'</w:robot>\n') % (self.capabilities_hash(),
oauth_tag,
ops.PROTOCOL_VERSION,
'\n'.join(lines))
def profile_json(self, name=None):
"""Returns a JSON representation of the profile.
This method is called both for the basic profile of the robot and to
get a proxying for profile, in which case name is set. By default
the information supplied at registration is returned.
Use register_profile_handler to override this default behavior.
"""
if self._profile_handler:
data = self._profile_handler(name)
else:
data = {'name': self.name,
'imageUrl': self.image_url,
'profileUrl': self.profile_url}
return simplejson.dumps(data)
def _wavelet_from_json(self, json, pending_ops):
"""Construct a wavelet from the passed json.
The json should either contain a wavelet and a blips record that
define those respective object. The returned wavelet
will be constructed using the passed pending_ops
OperationQueue.
Alternatively the json can be the result of a previous
wavelet.serialize() call. In that case the blips will
be contaned in the wavelet record.
"""
if isinstance(json, basestring):
json = simplejson.loads(json)
blips = {}
for blip_id, raw_blip_data in json['blips'].iteritems():
blips[blip_id] = blip.Blip(raw_blip_data, blips, pending_ops)
if 'wavelet' in json:
raw_wavelet_data = json['wavelet']
elif 'waveletData' in json:
raw_wavelet_data = json['waveletData']
else:
raw_wavelet_data = json
wavelet_blips = {}
wavelet_id = raw_wavelet_data['waveletId']
wave_id = raw_wavelet_data['waveId']
for blip_id, instance in blips.items():
if instance.wavelet_id == wavelet_id and instance.wave_id == wave_id:
wavelet_blips[blip_id] = instance
result = wavelet.Wavelet(raw_wavelet_data, wavelet_blips, self, pending_ops)
robot_address = json.get('robotAddress')
if robot_address:
result.robot_address = robot_address
return result
def process_events(self, json):
"""Process an incoming set of events encoded as json."""
parsed = simplejson.loads(json)
pending_ops = ops.OperationQueue()
event_wavelet = self._wavelet_from_json(parsed, pending_ops)
for event_data in parsed['events']:
for payload in self._handlers.get(event_data['type'], []):
handler, event_class, context, filter = payload
event = event_class(event_data, event_wavelet)
event.json = json
handler(event, event_wavelet)
pending_ops.set_capability_hash(self.capabilities_hash())
return simplejson.dumps(pending_ops.serialize())
def new_wave(self, domain, participants=None, message='', proxy_for_id=None,
submit=False):
"""Create a new wave with the initial participants on it.
A new wave is returned with its own operation queue. It the
responsibility of the caller to make sure this wave gets
submitted to the server, either by calling robot.submit() or
by calling .submit_with() on the returned wave.
Args:
domain: the domain to create the wavelet on. This should
in general correspond to the domain of the incoming
wavelet. (wavelet.domain). Exceptions are situations
where the robot is calling new_wave outside of an
event or when the server is handling multiple domains.
participants: initial participants on the wave. The robot
as the creator of the wave is always added.
message: a string that will be passed back to the robot
when the WAVELET_CREATOR event is fired. This is a
lightweight way to pass around state.
submit: if true, use the active gateway to make a round
trip to the server. This will return immediately an
actual waveid/waveletid and blipId for the root blip.
"""
operation_queue = ops.OperationQueue(proxy_for_id)
if not isinstance(message, basestring):
message = simplejson.dumps(message)
blip_data, wavelet_data = operation_queue.robot_create_wavelet(
domain=domain,
participants=participants,
message=message)
blips = {}
root_blip = blip.Blip(blip_data, blips, operation_queue)
blips[root_blip.blip_id] = root_blip
created = wavelet.Wavelet(wavelet_data,
blips=blips,
robot=self,
operation_queue=operation_queue)
if submit:
result = self._first_rpc_result(self.submit(created))
if type(result) == list:
result = result[0]
# Currently, data is sometimes wrapped in an outer 'data'
# Remove these 2 lines when that is no longer an issue.
if 'data' in result and len(result) == 2:
result = result['data']
if 'blipId' in result:
blip_data['blipId'] = result['blipId']
wavelet_data['rootBlipId'] = result['blipId']
for field in 'waveId', 'waveletId':
if field in result:
wavelet_data[field] = result[field]
blip_data[field] = result[field]
blips = {}
root_blip = blip.Blip(blip_data, blips, operation_queue)
blips[root_blip.blip_id] = root_blip
created = wavelet.Wavelet(wavelet_data,
blips=blips,
robot=self,
operation_queue=operation_queue)
return created
def fetch_wavelet(self, wave_id, wavelet_id, proxy_for_id=None):
"""Use the REST interface to fetch a wave and return it.
The returned wavelet contains a snapshot of the state of the
wavelet at that point. It can be used to modify the wavelet,
but the wavelet might change in between, so treat carefully.
Also note that the wavelet returned has its own operation
queue. It the responsibility of the caller to make sure this
wavelet gets submited to the server, either by calling
robot.submit() or by calling .submit_with() on the returned
wavelet.
"""
operation_queue = ops.OperationQueue(proxy_for_id)
operation_queue.robot_fetch_wave(wave_id, wavelet_id)
result = self._first_rpc_result(self.make_rpc(operation_queue))
return self._wavelet_from_json(result, ops.OperationQueue(proxy_for_id))
def blind_wavelet(self, json, proxy_for_id=None):
"""Construct a blind wave from a json string.
Call this method if you have a snapshot of a wave that you
want to operate on outside of an event. Since the wave might
have changed since you last saw it, you should take care to
submit operations that are as safe as possible.
Args:
json: a json object or string containing at least a key
wavelet defining the wavelet and a key blips defining the
blips in the view.
proxy_for_id: the proxying information that will be set on the wavelet's
operation queue.
Returns:
A new wavelet with its own operation queue. It the
responsibility of the caller to make sure this wavelet gets
submited to the server, either by calling robot.submit() or
by calling .submit_with() on the returned wavelet.
"""
return self._wavelet_from_json(json, ops.OperationQueue(proxy_for_id))
def submit(self, wavelet_to_submit):
"""Submit the pending operations associated with wavelet_to_submit.
Typically the wavelet will be the result of open_wavelet, blind_wavelet
or new_wavelet.
"""
pending = wavelet_to_submit.get_operation_queue()
res = self.make_rpc(pending)
pending.clear()
logging.info('submit returned:%s', res)
return res
| Python |
import cgi
import urllib
import time
import random
import urlparse
import hmac
import base64
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join(str(random.randint(0, 9)) for i in range(length))
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
@staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems())
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values)
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
@staticmethod
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
@staticmethod
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
@staticmethod
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = escape(callback)
return OAuthRequest(http_method, http_url, parameters)
# util function: turn Authorization: header into parameters, has to do some unescaping
@staticmethod
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
# util function: turn url string into parameters, has to do some unescaping
@staticmethod
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return base64.b64encode(hashed.digest())
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the ops module."""
import unittest
import ops
class TestOperation(unittest.TestCase):
"""Test case for Operation class."""
def testFields(self):
op = ops.Operation(ops.WAVELET_SET_TITLE, 'opid02',
{'waveId': 'wavelet-id',
'title': 'a title'})
self.assertEqual(ops.WAVELET_SET_TITLE, op.method)
self.assertEqual('opid02', op.id)
self.assertEqual(2, len(op.params))
def testConstructModifyTag(self):
q = ops.OperationQueue()
op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag')
self.assertEqual(3, len(op.params))
op = q.wavelet_modify_tag(
'waveid', 'waveletid', 'tag', modify_how='remove')
self.assertEqual(4, len(op.params))
def testConstructRobotFetchWave(self):
q = ops.OperationQueue('proxyid')
op = q.robot_fetch_wave('wave1', 'wavelet1')
self.assertEqual(3, len(op.params))
self.assertEqual('proxyid', op.params['proxyingFor'])
self.assertEqual('wave1', op.params['waveId'])
self.assertEqual('wavelet1', op.params['waveletId'])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the blip module."""
import unittest
import blip
import element
import ops
import simplejson
TEST_BLIP_DATA = {
'childBlipIds': [],
'content': '\nhello world!\nanother line',
'contributors': ['robot@test.com', 'user@test.com'],
'creator': 'user@test.com',
'lastModifiedTime': 1000,
'parentBlipId': None,
'annotations': [{'range': {'start': 2, 'end': 3},
'name': 'key', 'value': 'val'}],
'waveId': 'test.com!w+g3h3im',
'waveletId': 'test.com!root+conv',
'elements':{'14':{'type':'GADGET','properties':{'url':'http://a/b.xml'}}},
}
CHILD_BLIP_ID = 'b+42'
ROOT_BLIP_ID = 'b+43'
class TestBlip(unittest.TestCase):
"""Tests the primary data structures for the wave model."""
def assertBlipStartswith(self, expected, totest):
actual = totest.text[:len(expected)]
self.assertEquals(expected, actual)
def new_blip(self, **args):
"""Create a blip for testing."""
data = TEST_BLIP_DATA.copy()
data.update(args)
res = blip.Blip(data, self.all_blips, self.operation_queue)
self.all_blips[res.blip_id] = res
return res
def setUp(self):
self.all_blips = {}
self.operation_queue = ops.OperationQueue()
def testBlipProperties(self):
root = self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID])
child = self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
self.assertEquals(ROOT_BLIP_ID, root.blip_id)
self.assertEquals(set([CHILD_BLIP_ID]), root.child_blip_ids)
self.assertEquals(set(TEST_BLIP_DATA['contributors']), root.contributors)
self.assertEquals(TEST_BLIP_DATA['creator'], root.creator)
self.assertEquals(TEST_BLIP_DATA['content'], root.text)
self.assertEquals(TEST_BLIP_DATA['lastModifiedTime'],
root.last_modified_time)
self.assertEquals(TEST_BLIP_DATA['parentBlipId'], root.parent_blip_id)
self.assertEquals(TEST_BLIP_DATA['waveId'], root.wave_id)
self.assertEquals(TEST_BLIP_DATA['waveletId'], root.wavelet_id)
self.assertEquals(TEST_BLIP_DATA['content'][3], root[3])
self.assertEquals(element.Gadget.type, root[14].type)
self.assertEquals('http://a/b.xml', root[14].url)
self.assertEquals('a', root.text[14])
self.assertEquals(len(TEST_BLIP_DATA['content']), len(root))
self.assertTrue(root.is_root())
self.assertFalse(child.is_root())
self.assertEquals(root, child.parent_blip)
def testBlipSerialize(self):
root = self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID])
serialized = root.serialize()
unserialized = blip.Blip(serialized, self.all_blips, self.operation_queue)
self.assertEquals(root.blip_id, unserialized.blip_id)
self.assertEquals(root.child_blip_ids, unserialized.child_blip_ids)
self.assertEquals(root.contributors, unserialized.contributors)
self.assertEquals(root.creator, unserialized.creator)
self.assertEquals(root.text, unserialized.text)
self.assertEquals(root.last_modified_time, unserialized.last_modified_time)
self.assertEquals(root.parent_blip_id, unserialized.parent_blip_id)
self.assertEquals(root.wave_id, unserialized.wave_id)
self.assertEquals(root.wavelet_id, unserialized.wavelet_id)
self.assertTrue(unserialized.is_root())
def testDocumentOperations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
newlines = [x for x in blip.find('\n')]
self.assertEquals(2, len(newlines))
blip.first('world').replace('jupiter')
bits = blip.text.split('\n')
self.assertEquals(3, len(bits))
self.assertEquals('hello jupiter!', bits[1])
blip.range(2, 5).delete()
self.assertBlipStartswith('\nho jupiter', blip)
blip.first('ho').insert_after('la')
self.assertBlipStartswith('\nhola jupiter', blip)
blip.at(3).insert(' ')
self.assertBlipStartswith('\nho la jupiter', blip)
def testElementHandling(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
url = 'http://www.test.com/image.png'
org_len = len(blip)
blip.append(element.Image(url=url))
elems = [elem for elem in blip.find(element.Image, url=url)]
self.assertEquals(1, len(elems))
elem = elems[0]
self.assertTrue(isinstance(elem, element.Image))
blip.at(1).insert('twelve chars')
self.assertTrue(blip.text.startswith('\ntwelve charshello'))
elem = blip[org_len + 12].value()
self.assertTrue(isinstance(elem, element.Image))
blip.first('twelve ').delete()
self.assertTrue(blip.text.startswith('\nchars'))
elem = blip[org_len + 12 - len('twelve ')].value()
self.assertTrue(isinstance(elem, element.Image))
blip.first('chars').replace(element.Image(url=url))
elems = [elem for elem in blip.find(element.Image, url=url)]
self.assertEquals(2, len(elems))
self.assertTrue(blip.text.startswith('\n hello'))
elem = blip[1].value()
self.assertTrue(isinstance(elem, element.Image))
def testAnnotationHandling(self):
key = 'style/fontWeight'
def get_bold():
for an in blip.annotations[key]:
if an.value == 'bold':
return an
return None
json = ('[{"range":{"start":3,"end":6},"name":"%s","value":"bold"}]'
% key)
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json))
self.assertEquals(1, len(blip.annotations))
self.assertNotEqual(None, get_bold().value)
self.assertTrue(key in blip.annotations)
# extend the bold annotation by adding:
blip.range(5, 8).annotate(key, 'bold')
self.assertEquals(1, len(blip.annotations))
self.assertEquals(8, get_bold().end)
# clip by adding a same keyed:
blip[4:12].annotate(key, 'italic')
self.assertEquals(2, len(blip.annotations[key]))
self.assertEquals(4, get_bold().end)
# now split the italic one:
blip.range(6, 7).clear_annotation(key)
self.assertEquals(3, len(blip.annotations[key]))
# test names and iteration
self.assertEquals(1, len(blip.annotations.names()))
self.assertEquals(3, len([x for x in blip.annotations]))
blip[3: 5].annotate('foo', 'bar')
self.assertEquals(2, len(blip.annotations.names()))
self.assertEquals(4, len([x for x in blip.annotations]))
blip[3: 5].clear_annotation('foo')
# clear the whole thing
blip.all().clear_annotation(key)
# getting to the key should now throw an exception
self.assertRaises(KeyError, blip.annotations.__getitem__, key)
def testBlipOperations(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
self.assertEquals(1, len(self.all_blips))
otherblip = blip.reply()
otherblip.append('hello world')
self.assertEquals('hello world', otherblip.text)
self.assertEquals(blip.blip_id, otherblip.parent_blip_id)
self.assertEquals(2, len(self.all_blips))
inline = blip.insert_inline_blip(3)
self.assertEquals(blip.blip_id, inline.parent_blip_id)
self.assertEquals(3, len(self.all_blips))
def testDocumentModify(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('a text with text and then some text')
blip[7].insert('text ')
blip.all('text').replace('thing')
self.assertEquals('a thing thing with thing and then some thing',
blip.text)
def testBlipRefValue(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
content = blip.text
content = content[:4] + content[5:]
del blip[4]
self.assertEquals(content, blip.text)
content = content[:2] + content[3:]
del blip[2:3]
self.assertEquals(content, blip.text)
blip[2:3] = 'bike'
content = content[:2] + 'bike' + content[3:]
self.assertEquals(content, blip.text)
url = 'http://www.test.com/image.png'
blip.append(element.Image(url=url))
self.assertEqual(url, blip.first(element.Image).url)
url2 = 'http://www.test.com/another.png'
blip[-1].update_element({'url': url2})
self.assertEqual(url2, blip.first(element.Image).url)
self.assertTrue(blip[3:5] == blip.text[3:5])
blip.append('geheim')
self.assertTrue(blip.first('geheim'))
self.assertFalse(blip.first(element.Button))
def testReplace(self):
blip = self.new_blip(blipId=ROOT_BLIP_ID)
blip.all().replace('\nxxxx')
blip.all('yyy').replace('zzz')
self.assertEqual('\nxxxx', blip.text)
def testDeleteRangeThatSpansAcrossAnnotationEndPoint(self):
json = ('[{"range":{"start":1,"end":3},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).delete()
self.assertEqual('\nF bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(2, blip.annotations['style'][0].end)
def testInsertBeforeAnnotationStartPoint(self):
json = ('[{"range":{"start":4,"end":9},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.at(4).insert('d and')
self.assertEqual('\nFood and bar.', blip.text)
self.assertEqual(9, blip.annotations['style'][0].start)
self.assertEqual(14, blip.annotations['style'][0].end)
def testDeleteRangeInsideAnnotation(self):
json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).delete()
self.assertEqual('\nF bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(3, blip.annotations['style'][0].end)
def testReplaceInsideAnnotation(self):
json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 4).replace('ooo')
self.assertEqual('\nFooo bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(6, blip.annotations['style'][0].end)
blip.range(2, 5).replace('o')
self.assertEqual('\nFo bar.', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(4, blip.annotations['style'][0].end)
def testReplaceSpanAnnotation(self):
json = ('[{"range":{"start":1,"end":4},"name":"style","value":"bold"}]')
blip = self.new_blip(blipId=ROOT_BLIP_ID,
annotations=simplejson.loads(json),
content='\nFoo bar.')
blip.range(2, 9).replace('')
self.assertEqual('\nF', blip.text)
self.assertEqual(1, blip.annotations['style'][0].start)
self.assertEqual(2, blip.annotations['style'][0].end)
def testBlipsRemoveWithId(self):
blip_dict = {
ROOT_BLIP_ID: self.new_blip(blipId=ROOT_BLIP_ID,
childBlipIds=[CHILD_BLIP_ID]),
CHILD_BLIP_ID: self.new_blip(blipId=CHILD_BLIP_ID,
parentBlipId=ROOT_BLIP_ID)
}
blips = blip.Blips(blip_dict)
blips._remove_with_id(CHILD_BLIP_ID)
self.assertEqual(1, len(blips))
self.assertEqual(0, len(blips[ROOT_BLIP_ID].child_blip_ids))
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines event types that are sent from the wave server.
This module defines all of the event types currently supported by the wave
server. Each event type is sub classed from Event and has its own
properties depending on the type.
"""
class Context(object):
"""Specifies constants representing different context requests."""
#: Requests the root blip.
ROOT = 'ROOT'
#: Requests the parent blip of the event blip.
PARENT = 'PARENT'
#: Requests the siblings blip of the event blip.
SIBLINGS = 'SIBLINGS'
#: Requests the child blips of the event blip.
CHILDREN = 'CHILDREN'
#: Requests the event blip itself.
SELF = 'SELF'
#: Requests all of the blips of the event wavelet.
ALL = 'ALL'
class Event(object):
"""Object describing a single event.
Attributes:
modified_by: Participant id that caused this event.
timestamp: Timestamp that this event occurred on the server.
type: Type string of this event.
properties: Dictionary of all extra properties. Typically the derrived
event type should have these explicitly set as attributes, but
experimental features might appear in properties before that.
blip_id: The blip_id of the blip for blip related events or the root
blip for wavelet related events.
blip: If available, the blip with id equal to the events blip_id.
proxying_for: If available, the proxyingFor id of the robot that caused the
event.
"""
def __init__(self, json, wavelet):
"""Inits this event with JSON data.
Args:
json: JSON data from Wave server.
"""
self.modified_by = json.get('modifiedBy')
self.timestamp = json.get('timestamp', 0)
self.type = json.get('type')
self.raw_data = json
self.properties = json.get('properties', {})
self.blip_id = self.properties.get('blipId')
self.blip = wavelet.blips.get(self.blip_id)
self.proxying_for = json.get('proxyingFor')
class WaveletBlipCreated(Event):
"""Event triggered when a new blip is created.
Attributes:
new_blip_id: The id of the newly created blip.
new_blip: If in context, the actual new blip.
"""
type = 'WAVELET_BLIP_CREATED'
def __init__(self, json, wavelet):
super(WaveletBlipCreated, self).__init__(json, wavelet)
self.new_blip_id = self.properties['newBlipId']
self.new_blip = wavelet.blips.get(self.new_blip_id)
class WaveletBlipRemoved(Event):
"""Event triggered when a new blip is removed.
Attributes:
removed_blip_id: the id of the removed blip
removed_blip: if in context, the removed blip
"""
type = 'WAVELET_BLIP_REMOVED'
def __init__(self, json, wavelet):
super(WaveletBlipRemoved, self).__init__(json, wavelet)
self.removed_blip_id = self.properties['removedBlipId']
self.removed_blip = wavelet.blips.get(self.removed_blip_id)
class WaveletParticipantsChanged(Event):
"""Event triggered when the participants on a wave change.
Attributes:
participants_added: List of participants added.
participants_removed: List of participants removed.
"""
type = 'WAVELET_PARTICIPANTS_CHANGED'
def __init__(self, json, wavelet):
super(WaveletParticipantsChanged, self).__init__(json, wavelet)
self.participants_added = self.properties['participantsAdded']
self.participants_removed = self.properties['participantsRemoved']
class WaveletSelfAdded(Event):
"""Event triggered when the robot is added to the wavelet."""
type = 'WAVELET_SELF_ADDED'
class WaveletSelfRemoved(Event):
"""Event triggered when the robot is removed from the wavelet."""
type = 'WAVELET_SELF_REMOVED'
class WaveletTitleChanged(Event):
"""Event triggered when the title of the wavelet has changed.
Attributes:
title: The new title.
"""
type = 'WAVELET_TITLE_CHANGED'
def __init__(self, json, wavelet):
super(WaveletTitleChanged, self).__init__(json, wavelet)
self.title = self.properties['title']
class BlipContributorsChanged(Event):
"""Event triggered when the contributors to this blip change.
Attributes:
contributors_added: List of contributors that were added.
contributors_removed: List of contributors that were removed.
"""
type = 'BLIP_CONTRIBUTORS_CHANGED'
def __init__(self, json, wavelet):
super(BlipContributorsChanged, self).__init__(json, wavelet)
self.contibutors_added = self.properties['contributorsAdded']
self.contibutors_removed = self.properties['contributorsRemoved']
class BlipSubmitted(Event):
"""Event triggered when a blip is submitted."""
type = 'BLIP_SUBMITTED'
class DocumentChanged(Event):
"""Event triggered when a document is changed.
This event is fired after any changes in the document and should be used
carefully to keep the amount of traffic to the robot reasonable. Use
filters where appropriate.
"""
type = 'DOCUMENT_CHANGED'
class FormButtonClicked(Event):
"""Event triggered when a form button is clicked.
Attributes:
button_name: The name of the button that was clicked.
"""
type = 'FORM_BUTTON_CLICKED'
def __init__(self, json, wavelet):
super(FormButtonClicked, self).__init__(json, wavelet)
self.button_name = self.properties['buttonName']
class GadgetStateChanged(Event):
"""Event triggered when the state of a gadget changes.
Attributes:
index: The index of the gadget that changed in the document.
old_state: The old state of the gadget.
"""
type = 'GADGET_STATE_CHANGED'
def __init__(self, json, wavelet):
super(GadgetStateChanged, self).__init__(json, wavelet)
self.index = self.properties['index']
self.old_state = self.properties['oldState']
class AnnotatedTextChanged(Event):
"""Event triggered when text with an annotation has changed.
This is mainly useful in combination with a filter on the
name of the annotation.
Attributes:
name: The name of the annotation.
value: The value of the annotation that changed.
"""
type = 'ANNOTATED_TEXT_CHANGED'
def __init__(self, json, wavelet):
super(AnnotatedTextChanged, self).__init__(json, wavelet)
self.name = self.properties['name']
self.value = self.properties.get('value')
class OperationError(Event):
"""Triggered when an event on the server occurred.
Attributes:
operation_id: The operation id of the failing operation.
error_message: More information as to what went wrong.
"""
type = 'OPERATION_ERROR'
def __init__(self, json, wavelet):
super(OperationError, self).__init__(json, wavelet)
self.operation_id = self.properties['operationId']
self.error_message = self.properties['errorMessage']
class WaveletCreated(Event):
"""Triggered when a new wavelet is created.
This event is only triggered if the robot creates a new
wavelet and can be used to initialize the newly created wave.
wavelets created by other participants remain invisible
to the robot until the robot is added to the wave in
which case WaveletSelfAdded is triggered.
Attributes:
message: Whatever string was passed into the new_wave
call as message (if any).
"""
type = 'WAVELET_CREATED'
def __init__(self, json, wavelet):
super(WaveletCreated, self).__init__(json, wavelet)
self.message = self.properties['message']
class WaveletFetched(Event):
"""Triggered when a new wavelet is fetched.
This event is triggered after a robot requests to
see another wavelet. The robot has to be on the other
wavelet already.
Attributes:
message: Whatever string was passed into the new_wave
call as message (if any).
"""
type = 'WAVELET_FETCHED'
def __init__(self, json, wavelet):
super(WaveletFetched, self).__init__(json, wavelet)
self.message = self.properties['message']
def is_event(cls):
"""Returns whether the passed class is an event."""
try:
if not issubclass(cls, Event):
return False
return hasattr(cls, 'type')
except TypeError:
return False
ALL = [item for item in globals().copy().values() if is_event(item)]
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the element module."""
import unittest
import element
import util
class TestElement(unittest.TestCase):
"""Tests for the element.Element class."""
def testProperties(self):
el = element.Element(element.Gadget.type,
key='value')
self.assertEquals('value', el.key)
def testFormElement(self):
el = element.Input('input', label='label')
self.assertEquals(element.Input.type, el.type)
self.assertEquals(el.value, '')
self.assertEquals(el.name, 'input')
self.assertEquals(el.label, 'label')
def testImage(self):
image = element.Image('http://test.com/image.png', width=100, height=100)
self.assertEquals(element.Image.type, image.type)
self.assertEquals(image.url, 'http://test.com/image.png')
self.assertEquals(image.width, 100)
self.assertEquals(image.height, 100)
def testGadget(self):
gadget = element.Gadget('http://test.com/gadget.xml')
self.assertEquals(element.Gadget.type, gadget.type)
self.assertEquals(gadget.url, 'http://test.com/gadget.xml')
def testInstaller(self):
installer = element.Installer('http://test.com/installer.xml')
self.assertEquals(element.Installer.type, installer.type)
self.assertEquals(installer.manifest, 'http://test.com/installer.xml')
def testSerialize(self):
image = element.Image('http://test.com/image.png', width=100, height=100)
s = util.serialize(image)
k = s.keys()
k.sort()
# we should really only have three things to serialize
props = s['properties']
self.assertEquals(len(props), 3)
self.assertEquals(props['url'], 'http://test.com/image.png')
self.assertEquals(props['width'], 100)
self.assertEquals(props['height'], 100)
def testGadgetElementFromJson(self):
url = 'http://www.foo.com/gadget.xml'
json = {
'type': element.Gadget.type,
'properties': {
'url': url,
}
}
gadget = element.Element.from_json(json)
self.assertEquals(element.Gadget.type, gadget.type)
self.assertEquals(url, gadget.url)
def testImageElementFromJson(self):
url = 'http://www.foo.com/image.png'
width = '32'
height = '32'
attachment_id = '2'
caption = 'Test Image'
json = {
'type': element.Image.type,
'properties': {
'url': url,
'width': width,
'height': height,
'attachmentId': attachment_id,
'caption': caption,
}
}
image = element.Element.from_json(json)
self.assertEquals(element.Image.type, image.type)
self.assertEquals(url, image.url)
self.assertEquals(width, image.width)
self.assertEquals(height, image.height)
self.assertEquals(attachment_id, image.attachmentId)
self.assertEquals(caption, image.caption)
def testFormElementFromJson(self):
name = 'button'
value = 'value'
default_value = 'foo'
json = {
'type': element.Label.type,
'properties': {
'name': name,
'value': value,
'defaultValue': default_value,
}
}
el = element.Element.from_json(json)
self.assertEquals(element.Label.type, el.type)
self.assertEquals(name, el.name)
self.assertEquals(value, el.value)
def testCanInstantiate(self):
bag = [element.Check(name='check', value='value'),
element.Button(name='button', caption='caption'),
element.Input(name='input', value='caption'),
element.Label(label_for='button', caption='caption'),
element.RadioButton(name='name', group='group'),
element.RadioButtonGroup(name='name', value='value'),
element.Password(name='name', value='geheim'),
element.TextArea(name='name', value='\n\n\n'),
element.Installer(manifest='test.com/installer.xml'),
element.Line(line_type='type',
indent='3',
alignment='r',
direction='d'),
element.Gadget(url='test.com/gadget.xml',
props={'key1': 'val1', 'key2': 'val2'}),
element.Image(url='test.com/image.png', width=100, height=200)]
types_constructed = set([type(x) for x in bag])
types_required = set(element.ALL.values())
self.assertEquals(types_required, types_constructed)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elements are non-text bits living in blips like images, gadgets etc.
This module defines the Element class and the derived classes.
"""
import logging
import sys
import util
class Element(object):
"""Elements are non-text content within a document.
These are generally abstracted from the Robot. Although a Robot can query the
properties of an element it can only interact with the specific types that
the element represents.
Properties of elements are both accessible directly (image.url) and through
the properties dictionary (image.properties['url']). In general Element
should not be instantiated by robots, but rather rely on the derived classes.
"""
def __init__(self, element_type, **properties):
"""Initializes self with the specified type and any properties.
Args:
element_type: string typed member of ELEMENT_TYPE
properties: either a dictionary of initial properties, or a dictionary
with just one member properties that is itself a dictionary of
properties. This allows us to both use
e = Element(atype, prop1=val1, prop2=prop2...)
and
e = Element(atype, properties={prop1:val1, prop2:prop2..})
"""
#TODO: don't use setattr
if len(properties) == 1 and 'properties' in properties:
properties = properties['properties']
self.type = element_type
# as long as the operation_queue of an element in None, it is
# unattached. After an element is acquired by a blip, the blip
# will set the operation_queue to make sure all changes to the
# element are properly send to the server.
self._operation_queue = None
for key, val in properties.items():
setattr(self, key, val)
@classmethod
def from_json(cls, json):
"""Class method to instantiate an Element based on a json string."""
etype = json['type']
props = json['properties'].copy()
element_class = ALL.get(etype)
if not element_class:
# Unknown type. Server could be newer than we are
return Element(element_type=etype, properties=props)
return element_class.from_props(props)
def get(self, key, default=None):
"""Standard get interface."""
return getattr(self, key, default)
def serialize(self):
"""Custom serializer for Elements.
Element need their non standard attributes returned in a dict named
properties.
"""
props = {}
data = {}
for attr in dir(self):
if attr.startswith('_'):
continue
val = getattr(self, attr)
if val is None or callable(val):
continue
val = util.serialize(val)
if attr == 'type':
data[attr] = val
else:
props[attr] = val
data['properties'] = util.serialize(props)
return data
class Input(Element):
"""A single-line input element."""
type = 'INPUT'
def __init__(self, name, value='', label=''):
super(Input, self).__init__(Input.type,
name=name,
value=value,
default_value=value,
label=label)
@classmethod
def from_props(cls, props):
return Input(name=props['name'], value=props['value'], label=props['label'])
class Check(Element):
"""A checkbox element."""
type = 'CHECK'
def __init__(self, name, value=''):
super(Check, self).__init__(Check.type,
name=name, value=value, default_value=value)
@classmethod
def from_props(cls, props):
return Check(name=props['name'], value=props['value'])
class Button(Element):
"""A button element."""
type = 'BUTTON'
def __init__(self, name, caption):
super(Button, self).__init__(Button.type,
name=name, value=caption)
@classmethod
def from_props(cls, props):
return Button(name=props['name'], caption=props['value'])
class Label(Element):
"""A label element."""
type = 'LABEL'
def __init__(self, label_for, caption):
super(Label, self).__init__(Label.type,
name=label_for, value=caption)
@classmethod
def from_props(cls, props):
return Label(label_for=props['name'], caption=props['value'])
class RadioButton(Element):
"""A radio button element."""
type = 'RADIO_BUTTON'
def __init__(self, name, group):
super(RadioButton, self).__init__(RadioButton.type,
name=name, value=group)
@classmethod
def from_props(cls, props):
return RadioButton(name=props['name'], group=props['value'])
class RadioButtonGroup(Element):
"""A group of radio buttons."""
type = 'RADIO_BUTTON_GROUP'
def __init__(self, name, value):
super(RadioButtonGroup, self).__init__(RadioButtonGroup.type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return RadioButtonGroup(name=props['name'], value=props['value'])
class Password(Element):
"""A password element."""
type = 'PASSWORD'
def __init__(self, name, value):
super(Password, self).__init__(Password.type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return Password(name=props['name'], value=props['value'])
class TextArea(Element):
"""A text area element."""
type = 'TEXTAREA'
def __init__(self, name, value):
super(TextArea, self).__init__(TextArea.type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return TextArea(name=props['name'], value=props['value'])
class Line(Element):
"""A line element.
Note that Lines are represented in the text as newlines.
"""
type = 'LINE'
def __init__(self,
line_type=None,
indent=None,
alignment=None,
direction=None):
super(Line, self).__init__(Line.type,
lineType=line_type,
indent=indent,
alignment=alignment,
direction=direction)
@classmethod
def from_props(cls, props):
return Line(line_type=props.get('lineType'),
indent=props.get('indent'),
alignment=props.get('alignment'),
direction=props.get('direction'))
class Gadget(Element):
"""A gadget element."""
type = 'GADGET'
def __init__(self, url, props=None):
if props is None:
props = {}
props['url'] = url
super(Gadget, self).__init__(Gadget.type, properties=props)
@classmethod
def from_props(cls, props):
return Gadget(props.get('url'), props)
class Installer(Element):
"""An installer element."""
type = 'INSTALLER'
def __init__(self, manifest):
super(Installer, self).__init__(Installer.type, manifest=manifest)
@classmethod
def from_props(cls, props):
return Installer(props.get('manifest'))
class Image(Element):
"""An image element."""
type = 'IMAGE'
def __init__(self, url='', width=None, height=None,
attachmentId=None, caption=None):
super(Image, self).__init__(Image.type, url=url, width=width,
height=height, attachmentId=attachmentId, caption=caption)
@classmethod
def from_props(cls, props):
props = dict([(key.encode('utf-8'), value)
for key, value in props.items()])
logging.info('from_props=' + str(props))
return apply(Image, [], props)
def is_element(cls):
"""Returns whether the passed class is an element."""
try:
if not issubclass(cls, Element):
return False
return hasattr(cls, 'type')
except TypeError:
return False
ALL = dict([(item.type, item) for item in globals().copy().values()
if is_element(item)])
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the wavelet module."""
import unittest
import blip
import ops
import wavelet
ROBOT_NAME = 'robot@appspot.com'
TEST_WAVELET_DATA = {
'creator': ROBOT_NAME,
'creationTime': 100,
'lastModifiedTime': 101,
'participants': [ROBOT_NAME],
'rootBlipId': 'blip-1',
'title': 'Title',
'waveId': 'test.com!w+g3h3im',
'waveletId': 'test.com!root+conv',
'tags': ['tag1', 'tag2'],
}
TEST_BLIP_DATA = {
'blipId': TEST_WAVELET_DATA['rootBlipId'],
'childBlipIds': [],
'content': '\ntesting',
'contributors': [TEST_WAVELET_DATA['creator'], 'robot@google.com'],
'creator': TEST_WAVELET_DATA['creator'],
'lastModifiedTime': TEST_WAVELET_DATA['lastModifiedTime'],
'parentBlipId': None,
'waveId': TEST_WAVELET_DATA['waveId'],
'elements': {},
'waveletId': TEST_WAVELET_DATA['waveletId'],
}
class TestWavelet(unittest.TestCase):
"""Tests the wavelet class."""
def setUp(self):
self.operation_queue = ops.OperationQueue()
self.all_blips = {}
self.blip = blip.Blip(TEST_BLIP_DATA,
self.all_blips,
self.operation_queue)
self.all_blips[self.blip.blip_id] = self.blip
self.wavelet = wavelet.Wavelet(TEST_WAVELET_DATA,
self.all_blips,
None,
self.operation_queue)
self.wavelet.robot_address = ROBOT_NAME
def testWaveletProperties(self):
w = self.wavelet
self.assertEquals(TEST_WAVELET_DATA['creator'], w.creator)
self.assertEquals(TEST_WAVELET_DATA['creationTime'], w.creation_time)
self.assertEquals(TEST_WAVELET_DATA['lastModifiedTime'],
w.last_modified_time)
self.assertEquals(len(TEST_WAVELET_DATA['participants']),
len(w.participants))
self.assertTrue(TEST_WAVELET_DATA['participants'][0] in w.participants)
self.assertEquals(TEST_WAVELET_DATA['rootBlipId'], w.root_blip.blip_id)
self.assertEquals(TEST_WAVELET_DATA['title'], w.title)
self.assertEquals(TEST_WAVELET_DATA['waveId'], w.wave_id)
self.assertEquals(TEST_WAVELET_DATA['waveletId'], w.wavelet_id)
self.assertEquals('test.com', w.domain)
def testWaveletMethods(self):
w = self.wavelet
reply = w.reply()
self.assertEquals(2, len(w.blips))
w.delete(reply)
self.assertEquals(1, len(w.blips))
self.assertEquals(0, len(w.data_documents))
self.wavelet.data_documents['key'] = 'value'
self.assert_('key' in w.data_documents)
self.assertEquals(1, len(w.data_documents))
self.wavelet.data_documents['key'] = None
self.assertEquals(0, len(w.data_documents))
num_participants = len(w.participants)
w.proxy_for('proxy').reply()
self.assertEquals(2, len(w.blips))
# check that the new proxy for participant was added
self.assertEquals(num_participants + 1, len(w.participants))
w._robot_address = ROBOT_NAME.replace('@', '+proxy@')
w.proxy_for('proxy').reply()
self.assertEquals(num_participants + 1, len(w.participants))
self.assertEquals(3, len(w.blips))
def testSetTitle(self):
self.blip._content = '\nOld title\n\nContent'
self.wavelet.title = 'New title'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals('\nNew title\n\nContent', self.blip._content)
def testSetTitleAdjustRootBlipWithOneLineProperly(self):
self.blip._content = '\nOld title'
self.wavelet.title = 'New title'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals('\nNew title', self.blip._content)
def testSetTitleAdjustEmptyRootBlipProperly(self):
self.blip._content = '\n'
self.wavelet.title = 'New title'
self.assertEquals(1, len(self.operation_queue))
self.assertEquals('wavelet.setTitle',
self.operation_queue.serialize()[1]['method'])
self.assertEquals('\nNew title', self.blip._content)
def testTags(self):
w = self.wavelet
self.assertEquals(2, len(w.tags))
w.tags.append('tag3')
self.assertEquals(3, len(w.tags))
w.tags.append('tag3')
self.assertEquals(3, len(w.tags))
w.tags.remove('tag1')
self.assertEquals(2, len(w.tags))
self.assertEquals('tag2', w.tags[0])
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various API-specific exception classes.
This module contains various specific exception classes that are raised by
the library back to the client.
"""
class Error(Exception):
"""Base library error type."""
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility library containing various helpers used by the API."""
import re
CUSTOM_SERIALIZE_METHOD_NAME = 'serialize'
MARKUP_RE = re.compile(r'<([^>]*?)>')
def parse_markup(markup):
"""Parses a bit of markup into robot compatible text.
For now this is a rough approximation.
"""
def replace_tag(group):
if not group.groups:
return ''
tag = group.groups()[0].split(' ', 1)[0]
if (tag == 'p' or tag == 'br'):
return '\n'
return ''
return MARKUP_RE.sub(replace_tag, markup)
def is_iterable(inst):
"""Returns whether or not this is a list, tuple, set or dict .
Note that this does not return true for strings.
"""
return hasattr(inst, '__iter__')
def is_dict(inst):
"""Returns whether or not the specified instance is a dict."""
return hasattr(inst, 'iteritems')
def is_user_defined_new_style_class(obj):
"""Returns whether or not the specified instance is a user-defined type."""
return type(obj).__module__ != '__builtin__'
def lower_camel_case(s):
"""Converts a string to lower camel case.
Examples:
foo => foo
foo_bar => fooBar
foo__bar => fooBar
foo_bar_baz => fooBarBaz
Args:
s: The string to convert to lower camel case.
Returns:
The lower camel cased string.
"""
return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_'))
def upper_camel_case(s):
"""Converts a string to upper camel case.
Examples:
foo => Foo
foo_bar => FooBar
foo__bar => FooBar
foo_bar_baz => FooBarBaz
Args:
s: The string to convert to upper camel case.
Returns:
The upper camel cased string.
"""
return ''.join(fragment.capitalize() for fragment in s.split('_'))
def default_keywriter(key_name):
"""This key writer rewrites keys as lower camel case.
Expects that the input is formed by '_' delimited words.
Args:
key_name: Name of the key to serialize.
Returns:
Key name in lower camel-cased form.
"""
return lower_camel_case(key_name)
def _serialize_attributes(obj, key_writer=default_keywriter):
"""Serializes attributes of an instance.
Iterates all attributes of an object and invokes serialize if they are
public and not callable.
Args:
obj: The instance to serialize.
key_writer: Optional function that takes a string key and optionally mutates
it before serialization. For example:
def randomize(key_name):
return key_name += str(random.random())
Returns:
The serialized object.
"""
data = {}
for attr_name in dir(obj):
if attr_name.startswith('_'):
continue
attr = getattr(obj, attr_name)
if attr is None or callable(attr):
continue
# Looks okay, serialize it.
data[key_writer(attr_name)] = serialize(attr)
return data
def _serialize_dict(d, key_writer=default_keywriter):
"""Invokes serialize on all of its key/value pairs.
Args:
d: The dict instance to serialize.
key_writer: Optional key writer function.
Returns:
The serialized dict.
"""
data = {}
for k, v in d.items():
data[key_writer(k)] = serialize(v)
return data
def serialize(obj, key_writer=default_keywriter):
"""Serializes any instance.
If this is a user-defined instance
type, it will first check for a custom Serialize() function and use that
if it exists. Otherwise, it will invoke serialize all of its public
attributes. Lists and dicts are serialized trivially.
Args:
obj: The instance to serialize.
key_writer: Optional key writer function.
Returns:
The serialized object.
"""
if is_user_defined_new_style_class(obj):
if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME):
method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME)
if callable(method):
return method()
return _serialize_attributes(obj, key_writer)
elif is_dict(obj):
return _serialize_dict(obj, key_writer)
elif is_iterable(obj):
return [serialize(v) for v in obj]
return obj
class StringEnum(object):
"""Enum like class that is configured with a list of values.
This class effectively implements an enum for Elements, except for that
the actual values of the enums will be the string values.
"""
def __init__(self, *values):
for name in values:
setattr(self, name, name)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines classes that are needed to model a wavelet."""
import blip
import errors
class DataDocs(object):
"""Class modeling a bunch of data documents in pythonic way."""
def __init__(self, init_docs, wave_id, wavelet_id, operation_queue):
self._docs = init_docs
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __contains__(self, key):
return key in self._docs
def __delitem__(self, key):
if not key in self._docs:
return
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, None)
del self._docs[key]
def __getitem__(self, key):
return self._docs[key]
def __setitem__(self, key, value):
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, value)
if value is None and key in self._docs:
del self._docs[key]
else:
self._docs[key] = value
def __len__(self):
return len(self._docs)
def serialize(self):
"""Returns a dictionary of the data documents."""
return self._docs
class Participants(object):
"""Class modelling a set of participants in pythonic way."""
def __init__(self, participants, wave_id, wavelet_id, operation_queue):
self._participants = set(participants)
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __contains__(self, participant):
return participant in self._participants
def __len__(self):
return len(self._participants)
def __iter__(self):
return self._participants.__iter__()
def add(self, participant_id):
"""Adds a participant by their ID (address)."""
self._operation_queue.wavelet_add_participant(
self._wave_id, self._wavelet_id, participant_id)
self._participants.add(participant_id)
def serialize(self):
"""Returns a list of the participants."""
return list(self._participants)
class Tags(object):
"""Class modelling a list of tags."""
def __init__(self, tags, wave_id, wavelet_id, operation_queue):
self._tags = list(tags)
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __getitem__(self, index):
return self._tags[index]
def __len__(self):
return len(self._tags)
def __iter__(self):
return self._tags.__iter__()
def append(self, tag):
"""Appends a tag if it doesn't already exist."""
if tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag)
self._tags.append(tag)
def remove(self, tag):
"""Removes a tag if it exists."""
if not tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag, modify_how='remove')
self._tags.remove(tag)
def serialize(self):
"""Returns a list of tags."""
return list(self._tags)
class Wavelet(object):
"""Models a single wavelet.
A single wavelet is composed of metadata, participants, and its blips.
To guarantee that all blips are available, specify Context.ALL for events.
"""
def __init__(self, json, blips, robot, operation_queue):
"""Inits this wavelet with JSON data.
Args:
json: JSON data dictionary from Wave server.
blips: a dictionary object that can be used to resolve blips.
robot: the robot owning this wavelet.
operation_queue: an OperationQueue object to be used to
send any generated operations to.
"""
self._robot = robot
self._operation_queue = operation_queue
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
self._creator = json.get('creator')
self._creation_time = json.get('creationTime', 0)
self._data_documents = DataDocs(json.get('dataDocuments', {}),
self._wave_id,
self._wavelet_id,
operation_queue)
self._last_modified_time = json.get('lastModifiedTime')
self._participants = Participants(json.get('participants', []),
self._wave_id,
self._wavelet_id,
operation_queue)
self._title = json.get('title', '')
self._tags = Tags(json.get('tags', []),
self._wave_id,
self._wavelet_id,
operation_queue)
self._raw_data = json
self._blips = blip.Blips(blips)
self._root_blip_id = json.get('rootBlipId')
if self._root_blip_id and self._root_blip_id in self._blips:
self._root_blip = self._blips[self._root_blip_id]
else:
self._root_blip = None
self._robot_address = None
@property
def wavelet_id(self):
"""Returns this wavelet's id."""
return self._wavelet_id
@property
def wave_id(self):
"""Returns this wavelet's parent wave id."""
return self._wave_id
@property
def creator(self):
"""Returns the participant id of the creator of this wavelet."""
return self._creator
@property
def creation_time(self):
"""Returns the time that this wavelet was first created in milliseconds."""
return self._creation_time
@property
def data_documents(self):
"""Returns the data documents for this wavelet based on key name."""
return self._data_documents
@property
def domain(self):
"""Return the domain that wavelet belongs to."""
p = self._wave_id.find('!')
if p == -1:
return None
else:
return self._wave_id[:p]
@property
def last_modified_time(self):
"""Returns the time that this wavelet was last modified in ms."""
return self._last_modified_time
@property
def participants(self):
"""Returns a set of participants on this wavelet."""
return self._participants
@property
def tags(self):
"""Returns a list of tags for this wavelet."""
return self._tags
@property
def robot(self):
"""The robot that owns this wavelet."""
return self._robot
def _get_title(self):
return self._title
def _set_title(self, title):
if title.find('\n') != -1:
raise errors.Error('Wavelet title should not contain a newline ' +
'character. Specified: ' + title)
self._operation_queue.wavelet_set_title(self.wave_id, self.wavelet_id,
title)
self._title = title
# Adjust the content of the root blip, if it is available in the context.
if self._root_blip:
content = ''
splits = self._root_blip._content.split('\n', 2)
if len(splits) == 3:
content = '\n' + splits[2]
self._root_blip._content = '\n' + title + content
#: Returns or sets the wavelet's title.
title = property(_get_title, _set_title)
def _get_robot_address(self):
return self._robot_address
def _set_robot_address(self, address):
if self._robot_address:
raise errors.Error('robot address already set')
self._robot_address = address
"""The address of the current robot."""
robot_address = property(_get_robot_address, _set_robot_address)
@property
def root_blip(self):
"""Returns this wavelet's root blip."""
return self._root_blip
@property
def blips(self):
"""Returns the blips for this wavelet."""
return self._blips
def get_operation_queue(self):
"""Returns the OperationQueue for this wavelet."""
return self._operation_queue
def serialize(self):
"""Return a dict of the wavelet properties."""
return {'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'creator': self._creator,
'creationTime': self._creation_time,
'dataDocuments': self._data_documents.serialize(),
'lastModifiedTime': self._last_modified_time,
'participants': self._participants.serialize(),
'title': self._title,
'blips': self._blips.serialize(),
'rootBlipId': self._root_blip_id
}
def proxy_for(self, proxy_for_id):
"""Return a view on this wavelet that will proxy for the specified id.
A shallow copy of the current wavelet is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
"""
self.add_proxying_participant(proxy_for_id)
operation_queue = self.get_operation_queue().proxy_for(proxy_for_id)
res = Wavelet(json={},
blips={},
robot=self.robot,
operation_queue=operation_queue)
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._creator = self._creator
res._creation_time = self._creation_time
res._data_documents = self._data_documents
res._last_modified_time = self._last_modified_time
res._participants = self._participants
res._title = self._title
res._raw_data = self._raw_data
res._blips = self._blips
res._root_blip = self._root_blip
return res
def add_proxying_participant(self, id):
"""Ads a proxying participant to the wave.
Proxying participants are of the form robot+proxy@domain.com. This
convenience method constructs this id and then calls participants.add.
"""
if not self.robot_address:
raise errors.Error(
'Need a robot address to add a proxying for participant')
robotid, domain = self.robot_address.split('@', 1)
if '#' in robotid:
robotid, version = robotid.split('#')
else:
version = None
if '+' in robotid:
newid = robotid.split('+', 1)[0] + '+' + id
else:
newid = robotid + '+' + id
if version:
newid += '#' + version
newid += '@' + domain
self.participants.add(newid)
def submit_with(self, other_wavelet):
"""Submit this wavelet when the passed other wavelet is submited.
wavelets constructed outside of the event callback need to
be either explicitly submited using robot.submit(wavelet) or be
associated with a different wavelet that will be submited or
is part of the event callback.
"""
other_wavelet._operation_queue.copy_operations(self._operation_queue)
self._operation_queue = other_wavelet._operation_queue
def reply(self, initial_content=None):
"""Replies to the conversation in this wavelet.
Args:
initial_content: if set, start with this content.
Returns:
A transient version of the blip that contains the reply.
"""
if not initial_content:
initial_content = '\n'
blip_data = self._operation_queue.wavelet_append_blip(
self.wave_id, self.wavelet_id, initial_content)
instance = blip.Blip(blip_data, self._blips, self._operation_queue)
self._blips._add(instance)
return instance
def delete(self, todelete):
"""Remove a blip from this wavelet.
Args:
todelete: either a blip or a blip id to be removed.
"""
if isinstance(todelete, blip.Blip):
blip_id = todelete.blip_id
else:
blip_id = todelete
self._operation_queue.blip_delete(self.wave_id, self.wavelet_id, blip_id)
self._blips._remove_with_id(blip_id)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import element
import errors
import util
class Annotation(object):
"""Models an annotation on a document.
Annotations are key/value pairs over a range of content. Annotations
can be used to store data or to be interpreted by a client when displaying
the data.
"""
# Use the following constants to control the display of the client
#: Reserved annotation for setting background color of text.
BACKGROUND_COLOR = "style/backgroundColor"
#: Reserved annotation for setting color of text.
COLOR = "style/color"
#: Reserved annotation for setting font family of text.
FONT_FAMILY = "style/fontFamily"
#: Reserved annotation for setting font family of text.
FONT_SIZE = "style/fontSize"
#: Reserved annotation for setting font style of text.
FONT_STYLE = "style/fontStyle"
#: Reserved annotation for setting font weight of text.
FONT_WEIGHT = "style/fontWeight"
#: Reserved annotation for setting text decoration.
TEXT_DECORATION = "style/textDecoration"
#: Reserved annotation for setting vertical alignment.
VERTICAL_ALIGN = "style/verticalAlign"
def __init__(self, name, value, start, end):
self._name = name
self._value = value
self._start = start
self._end = end
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def start(self):
return self._start
@property
def end(self):
return self._end
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
if self._start >= where:
self._start += inc
if self._end >= where:
self._end += inc
def serialize(self):
"""Serializes the annotation.
Returns:
A dict containing the name, value, and range values.
"""
return {'name': self._name,
'value': self._value,
'range': {'start': self._start,
'end': self._end}}
class Annotations(object):
"""A dictionary-like object containing the annotations, keyed by name."""
def __init__(self, operation_queue, blip):
self._operation_queue = operation_queue
self._blip = blip
self._store = {}
def __contains__(self, what):
if isinstance(what, Annotation):
what = what.name
return what in self._store
def _add_internal(self, name, value, start, end):
"""Internal add annotation does not send out operations."""
if name in self._store:
# TODO: use bisect to make this more efficient.
new_list = []
for existing in self._store[name]:
if start > existing.end or end < existing.start:
new_list.append(existing)
else:
if existing.value == value:
# merge the annotations:
start = min(existing.start, start)
end = max(existing.end, end)
else:
# chop the bits off the existing annotation
if existing.start < start:
new_list.append(Annotation(
existing.name, existing.value, existing.start, start))
if existing.end > end:
new_list.append(Annotation(
existing.name, existing.value, existing.end, end))
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
else:
self._store[name] = [Annotation(name, value, start, end)]
def _delete_internal(self, name, start=0, end=-1):
"""Remove the passed annotaion from the internal representation."""
if not name in self._store:
return
if end < 0:
end = len(self._blip) + end
new_list = []
for a in self._store[name]:
if start > a.end or end < a.start:
new_list.append(a)
elif start < a.start and end > a.end:
continue
else:
if a.start < start:
new_list.append(Annotation(name, a.value, a.start, start))
if a.end > end:
new_list.append(Annotation(name, a.value, end, a.end))
if new_list:
self._store[name] = new_list
else:
del self._store[name]
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
for annotations in self._store.values():
for annotation in annotations:
annotation._shift(where, inc)
# Merge fragmented annotations that should be contiguous, for example:
# Annotation('foo', 'bar', 1, 2) and Annotation('foo', 'bar', 2, 3).
for name, annotations in self._store.items():
new_list = []
for i, annotation in enumerate(annotations):
name = annotation.name
value = annotation.value
start = annotation.start
end = annotation.end
# Find the last end index.
for j, next_annotation in enumerate(annotations[i + 1:]):
# Not contiguous, skip.
if (end < next_annotation.start):
break
# Contiguous, merge.
if (end == next_annotation.start and value == next_annotation.value):
end = next_annotation.end
del annotations[j]
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
def __len__(self):
return len(self._store)
def __getitem__(self, key):
return self._store[key]
def __iter__(self):
for l in self._store.values():
for ann in l:
yield ann
def names(self):
"""Return the names of the annotations in the store."""
return self._store.keys()
def serialize(self):
"""Return a list of the serialized annotations."""
res = []
for v in self._store.values():
res += [a.serialize() for a in v]
return res
class Blips(object):
"""A dictionary-like object containing the blips, keyed on blip ID."""
def __init__(self, blips):
self._blips = blips
def __getitem__(self, blip_id):
return self._blips[blip_id]
def __iter__(self):
return self._blips.__iter__()
def __len__(self):
return len(self._blips)
def _add(self, ablip):
self._blips[ablip.blip_id] = ablip
def _remove_with_id(self, blip_id):
del_blip = self._blips[blip_id]
if del_blip:
# Remove the reference to this blip from its parent.
parent_blip = self._blips[blip_id].parent_blip
if parent_blip:
parent_blip._child_blip_ids.remove(blip_id)
del self._blips[blip_id]
def get(self, blip_id, default_value=None):
"""Retrieves a blip.
Returns:
A Blip object. If none found for the ID, it returns None,
or if default_value is specified, it returns that.
"""
return self._blips.get(blip_id, default_value)
def serialize(self):
"""Serializes the blips.
Returns:
A dict of serialized blips.
"""
res = {}
for blip_id, item in self._blips.items():
res[blip_id] = item.serialize()
return res
class BlipRefs(object):
"""Represents a set of references to contents in a blip.
For example, a BlipRefs instance can represent the results
of a search, an explicitly set range, a regular expression,
or refer to the entire blip. BlipRefs are used to express
operations on a blip in a consistent way that can easily
be transfered to the server.
The typical way of creating a BlipRefs object is to use
selector methods on the Blip object. Developers will not
usually instantiate a BlipRefs object directly.
"""
DELETE = 'DELETE'
REPLACE = 'REPLACE'
INSERT = 'INSERT'
INSERT_AFTER = 'INSERT_AFTER'
ANNOTATE = 'ANNOTATE'
CLEAR_ANNOTATION = 'CLEAR_ANNOTATION'
UPDATE_ELEMENT = 'UPDATE_ELEMENT'
def __init__(self, blip, maxres=1):
self._blip = blip
self._maxres = maxres
@classmethod
def all(cls, blip, findwhat, maxres=-1, **restrictions):
"""Construct an instance representing the search for text or elements."""
obj = cls(blip, maxres)
obj._findwhat = findwhat
obj._restrictions = restrictions
obj._hits = lambda: obj._find(findwhat, maxres, **restrictions)
if findwhat is None:
# No findWhat, take the entire blip
obj._params = {}
else:
query = {'maxRes': maxres}
if isinstance(findwhat, basestring):
query['textMatch'] = findwhat
else:
query['elementMatch'] = findwhat.type
query['restrictions'] = restrictions
obj._params = {'modifyQuery': query}
return obj
@classmethod
def range(cls, blip, begin, end):
"""Constructs an instance representing an explicitly set range."""
obj = cls(blip)
obj._begin = begin
obj._end = end
obj._hits = lambda: [(begin, end)]
obj._params = {'range': {'start': begin, 'end': end}}
return obj
def _elem_matches(self, elem, clz, **restrictions):
if not isinstance(elem, clz):
return False
for key, val in restrictions.items():
if getattr(elem, key) != val:
return False
return True
def _find(self, what, maxres=-1, **restrictions):
"""Iterates where 'what' occurs in the associated blip.
What can be either a string or a class reference.
Examples:
self._find('hello') will return the first occurence of the word hello
self._find(element.Gadget, url='http://example.com/gadget.xml')
will return the first gadget that has as url example.com.
Args:
what: what to search for. Can be a class or a string. The class
should be an element from element.py
maxres: number of results to return at most, or <= 0 for all.
restrictions: if what specifies a class, further restrictions
of the found instances.
Yields:
Tuples indicating the range of the matches. For a one
character/element match at position x, (x, x+1) is yielded.
"""
blip = self._blip
if what is None:
yield 0, len(blip)
raise StopIteration
if isinstance(what, basestring):
idx = blip._content.find(what)
count = 0
while idx != -1:
yield idx, idx + len(what)
count += 1
if count == maxres:
raise StopIteration
idx = blip._content.find(what, idx + len(what))
else:
count = 0
for idx, el in blip._elements.items():
if self._elem_matches(el, what, **restrictions):
yield idx, idx + 1
count += 1
if count == maxres:
raise StopIteration
def _execute(self, modify_how, what):
"""Executes this BlipRefs object.
Args:
modify_how: What to do. Any of the operation declared at the top.
what: Depending on the operation. For delete, has to be None.
For the others it is a singleton, a list or a function returning
what to do; for ANNOTATE tuples of (key, value), for the others
either string or elements.
If what is a function, it takes three parameters, the content of
the blip, the beginning of the matching range and the end.
Raises:
IndexError when trying to access content outside of the blip.
ValueError when called with the wrong values.
Returns:
self for chainability.
"""
blip = self._blip
if modify_how != BlipRefs.DELETE:
if type(what) != list:
what = [what]
next_index = 0
matched = []
# updated_elements is used to store the element type of the
# element to update
updated_elements = []
# For now, if we find one markup, we'll use it everywhere.
next = None
for start, end in self._hits():
if start < 0:
start += len(blip)
if end == 0:
end += len(blip)
if end < 0:
end += len(blip)
if len(blip) == 0:
if start != 0 or end != 0:
raise IndexError('Start and end have to be 0 for empty document')
elif start < 0 or end < 1 or start >= len(blip) or end > len(blip):
raise IndexError('Position outside the document')
if modify_how == BlipRefs.DELETE:
for i in range(start, end):
if i in blip._elements:
del blip._elements[i]
blip._delete_annotations(start, end)
blip._shift(end, start - end)
blip._content = blip._content[:start] + blip._content[end:]
else:
if callable(what):
next = what(blip._content, start, end)
matched.append(next)
else:
next = what[next_index]
next_index = (next_index + 1) % len(what)
if isinstance(next, str):
next = next.decode('utf-8')
if modify_how == BlipRefs.ANNOTATE:
key, value = next
blip.annotations._add_internal(key, value, start, end)
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
blip.annotations._delete_internal(next, start, end)
elif modify_how == BlipRefs.UPDATE_ELEMENT:
el = blip._elements.get(start)
if not element:
raise ValueError('No element found at index %s' % start)
# the passing around of types this way feels a bit dirty:
updated_elements.append(element.Element(el.type, properties=next))
for k, b in next.items():
setattr(el, k, b)
else:
if modify_how == BlipRefs.INSERT:
end = start
elif modify_how == BlipRefs.INSERT_AFTER:
start = end
elif modify_how == BlipRefs.REPLACE:
pass
else:
raise ValueError('Unexpected modify_how: ' + modify_how)
if isinstance(next, element.Element):
text = ' '
else:
text = next
# in the case of a replace, and the replacement text is shorter,
# delete the delta.
if start != end and len(text) < end - start:
blip._delete_annotations(start + len(text), end)
blip._shift(end, len(text) + start - end)
blip._content = blip._content[:start] + text + blip._content[end:]
if isinstance(next, element.Element):
blip._elements[start] = next
operation = blip._operation_queue.document_modify(blip.wave_id,
blip.wavelet_id,
blip.blip_id)
for param, value in self._params.items():
operation.set_param(param, value)
modify_action = {'modifyHow': modify_how}
if modify_how == BlipRefs.DELETE:
pass
elif modify_how == BlipRefs.UPDATE_ELEMENT:
modify_action['elements'] = updated_elements
elif (modify_how == BlipRefs.REPLACE or
modify_how == BlipRefs.INSERT or
modify_how == BlipRefs.INSERT_AFTER):
if callable(what):
what = matched
if what:
if not isinstance(next, element.Element):
modify_action['values'] = [str(value) for value in what]
else:
modify_action['elements'] = what
elif modify_how == BlipRefs.ANNOTATE:
modify_action['values'] = [x[1] for x in what]
modify_action['annotationKey'] = what[0][0]
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
modify_action['annotationKey'] = what[0]
operation.set_param('modifyAction', modify_action)
return self
def insert(self, what):
"""Inserts what at the matched positions."""
return self._execute(BlipRefs.INSERT, what)
def insert_after(self, what):
"""Inserts what just after the matched positions."""
return self._execute(BlipRefs.INSERT_AFTER, what)
def replace(self, what):
"""Replaces the matched positions with what."""
return self._execute(BlipRefs.REPLACE, what)
def delete(self):
"""Deletes the content at the matched positions."""
return self._execute(BlipRefs.DELETE, None)
def annotate(self, name, value=None):
"""Annotates the content at the matched positions.
You can either specify both name and value to set the
same annotation, or supply as the first parameter something
that yields name/value pairs.
"""
if value is None:
what = name
else:
what = (name, value)
return self._execute(BlipRefs.ANNOTATE, what)
def clear_annotation(self, name):
"""Clears the annotation at the matched positions."""
return self._execute(BlipRefs.CLEAR_ANNOTATION, name)
def update_element(self, new_values):
"""Update an existing element with a set of new values."""
return self._execute(BlipRefs.UPDATE_ELEMENT, new_values)
def __nonzero__(self):
"""Return whether we have a value."""
for start, end in self._hits():
return True
return False
def value(self):
"""Convenience method to convert a BlipRefs to value of its first match."""
for start, end in self._hits():
if end - start == 1 and start in self._blip._elements:
return self._blip._elements[start]
else:
return self._blip.text[start:end]
raise ValueError('BlipRefs has no values')
def __getattr__(self, attribute):
"""Mirror the getattr of value().
This allows for clever things like
first(IMAGE).url
or
blip.annotate_with(key, value).upper()
"""
return getattr(self.value(), attribute)
def __radd__(self, other):
"""Make it possible to add this to a string."""
return other + self.value()
def __cmp__(self, other):
"""Support comparision with target."""
return cmp(self.value(), other)
class Blip(object):
"""Models a single blip instance.
Blips are essentially the documents that make up a conversation. Blips can
live in a hierarchy of blips. A root blip has no parent blip id, but all
blips have the ids of the wave and wavelet that they are associated with.
Blips also contain annotations, content and elements, which are accessed via
the Document object.
"""
def __init__(self, json, other_blips, operation_queue):
"""Inits this blip with JSON data.
Args:
json: JSON data dictionary from Wave server.
other_blips: A dictionary like object that can be used to resolve
ids of blips to blips.
operation_queue: an OperationQueue object to store generated operations
in.
"""
self._blip_id = json.get('blipId')
self._operation_queue = operation_queue
self._child_blip_ids = set(json.get('childBlipIds', []))
self._content = json.get('content', '')
self._contributors = set(json.get('contributors', []))
self._creator = json.get('creator')
self._last_modified_time = json.get('lastModifiedTime', 0)
self._version = json.get('version', 0)
self._parent_blip_id = json.get('parentBlipId')
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
if isinstance(other_blips, Blips):
self._other_blips = other_blips
else:
self._other_blips = Blips(other_blips)
self._annotations = Annotations(operation_queue, self)
for annjson in json.get('annotations', []):
r = annjson['range']
self._annotations._add_internal(annjson['name'],
annjson['value'],
r['start'],
r['end'])
self._elements = {}
json_elements = json.get('elements', {})
for elem in json_elements:
self._elements[int(elem)] = element.Element.from_json(json_elements[elem])
self.raw_data = json
@property
def blip_id(self):
"""The id of this blip."""
return self._blip_id
@property
def wave_id(self):
"""The id of the wave that this blip belongs to."""
return self._wave_id
@property
def wavelet_id(self):
"""The id of the wavelet that this blip belongs to."""
return self._wavelet_id
@property
def child_blip_ids(self):
"""The set of the ids of this blip's children."""
return self._child_blip_ids
@property
def child_blips(self):
"""The set of blips that are children of this blip."""
return set([self._other_blips[blid_id] for blid_id in self._child_blip_ids
if blid_id in self._other_blips])
@property
def contributors(self):
"""The set of participant ids that contributed to this blip."""
return self._contributors
@property
def creator(self):
"""The id of the participant that created this blip."""
return self._creator
@property
def last_modified_time(self):
"""The time in seconds since epoch when this blip was last modified."""
return self._last_modified_time
@property
def version(self):
"""The version of this blip."""
return self._version
@property
def parent_blip_id(self):
"""The parent blip_id or None if this is the root blip."""
return self._parent_blip_id
@property
def parent_blip(self):
"""The parent blip or None if it is the root."""
# if parent_blip_id is None, get will also return None
return self._other_blips.get(self._parent_blip_id)
def is_root(self):
"""Returns whether this is the root blip of a wavelet."""
return self._parent_blip_id is None
@property
def annotations(self):
"""The annotations for this document."""
return self._annotations
@property
def elements(self):
"""The elements for this document.
The elements of a document are things like forms elements, gadgets
that cannot be expressed as plain text. The elements property of
a document is a dictionary like object from index in the document
to element instance. In the text of the document you'll typically
find a space as a place holder for the element.
"""
return self._elements.values()
def __len__(self):
return len(self._content)
def __getitem__(self, item):
"""returns a BlipRefs for the given slice."""
if isinstance(item, slice):
if item.step:
raise errors.Error('Step not supported for blip slices')
return self.range(item.start, item.stop)
else:
return self.at(item)
def __setitem__(self, item, value):
"""short cut for self.range/at().replace(value)."""
self.__getitem__(item).replace(value)
def __delitem__(self, item):
"""short cut for self.range/at().delete()."""
self.__getitem__(item).delete()
def _shift(self, where, inc):
"""Move element and annotations after 'where' up by 'inc'."""
new_elements = {}
for idx, el in self._elements.items():
if idx >= where:
idx += inc
new_elements[idx] = el
self._elements = new_elements
self._annotations._shift(where, inc)
def _delete_annotations(self, start, end):
"""Delete all annotations between 'start' and 'end'."""
for annotation_name in self._annotations.names():
self._annotations._delete_internal(annotation_name, start, end)
def all(self, findwhat=None, maxres=-1, **restrictions):
"""Returns a BlipRefs object representing all results for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, maxres, **restrictions)
def first(self, findwhat=None, **restrictions):
"""Returns a BlipRefs object representing the first result for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, 1, **restrictions)
def at(self, index):
"""Returns a BlipRefs object representing a 1-character range."""
return BlipRefs.range(self, index, index + 1)
def range(self, start, end):
"""Returns a BlipRefs object representing the range."""
return BlipRefs.range(self, start, end)
def serialize(self):
"""Return a dictionary representation of this blip ready for json."""
return {'blipId': self._blip_id,
'childBlipIds': list(self._child_blip_ids),
'content': self._content,
'creator': self._creator,
'contributors': list(self._contributors),
'lastModifiedTime': self._last_modified_time,
'version': self._version,
'parentBlipId': self._parent_blip_id,
'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'annotations': self._annotations.serialize(),
'elements': dict([(index, e.serialize())
for index, e in self._elements.items()])
}
def proxy_for(self, proxy_for_id):
"""Return a view on this blip that will proxy for the specified id.
A shallow copy of the current blip is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
"""
self.wavel
operation_queue = self._operation_queue().proxy_for(proxy_for_id)
res = Blip(json={},
other_blips={},
operation_queue=operation_queue)
res._blip_id = self._blip_id
res._child_blip_ids = self._child_blip_ids
res._content = self._content
res._contributors = self._contributors
res._creator = self._creator
res._last_modified_time = self._last_modified_time
res._version = self._version
res._parent_blip_id = self._parent_blip_id
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._other_blips = self._other_blips
res._annotations = self._annotations
res._elements = self._elements
res.raw_data = self.raw_data
return res
@property
def text(self):
"""Returns the raw text content of this document."""
return self._content
def find(self, what, **restrictions):
"""Iterate to matching bits of contents.
Yield either elements or pieces of text.
"""
br = BlipRefs.all(self, what, **restrictions)
for start, end in br._hits():
if end - start == 1 and start in self._elements:
yield self._elements[start]
else:
yield self._content[start:end]
raise StopIteration
def append(self, what):
"""Convenience method covering a common pattern."""
return BlipRefs.all(self, findwhat=None).insert_after(what)
def reply(self):
"""Create and return a reply to this blip."""
blip_data = self._operation_queue.blip_create_child(self.wave_id,
self.wavelet_id,
self.blip_id)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
def append_markup(self, markup):
"""Interpret the markup text as xhtml and append the result to the doc.
Args:
markup: The markup'ed text to append.
"""
self._operation_queue.document_append_markup(self.wave_id,
self.wavelet_id,
self.blip_id,
markup)
#TODO: at least strip the html out
self._content += markup
def insert_inline_blip(self, position):
"""Inserts an inline blip into this blip at a specific position.
Args:
position: Position to insert the blip at.
Returns:
The JSON data of the blip that was created.
"""
blip_data = self._operation_queue.document_inline_blip_insert(
self.wave_id,
self.wavelet_id,
self.blip_id,
position)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the robot module."""
import unittest
import events
import ops
import robot
import simplejson
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13","'
'annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/davidbyttow@google.com","value":"David"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
NEW_WAVE_JSON = [{"data":
{"waveletId": "wavesandbox.com!conv+root",
"blipId": "b+LrODcLZkDlu", "waveId":
"wavesandbox.com!w+LrODcLZkDlt"},
"id": "op2"}]
NEW_WAVE_JSON_OLD = [{'data':
[{'data':
{'waveletId': 'googlewave.com!conv+root',
'blipId': 'b+VqQXQbZkCP1',
'waveId': 'googlewave.com!w+VqQXQbZkCP0'},
'id': 'wavelet.create1265055048410'}],
'id': 'op10'}];
class TestRobot(unittest.TestCase):
"""Tests for testing the basic parsing of json in robots."""
def setUp(self):
self.robot = robot.Robot('Testy')
def testCreateWave(self):
self.robot.submit = lambda x: NEW_WAVE_JSON
new_wave = self.robot.new_wave('wavesandbox.com', submit=True)
self.assertEqual('wavesandbox.com!w+LrODcLZkDlt', new_wave.wave_id)
self.robot.submit = lambda x: NEW_WAVE_JSON_OLD
new_wave = self.robot.new_wave('googlewave.com', submit=True)
self.assertEqual('googlewave.com!w+VqQXQbZkCP0', new_wave.wave_id)
def testEventParsing(self):
def check(event, wavelet):
# Test some basic properties; the rest should be covered by
# ops.CreateContext.
root = wavelet.root_blip
self.assertEqual(1, len(wavelet.blips))
self.assertEqual('wdykLROk*13', root.blip_id)
self.assertEqual('test.com!wdykLROk*11', root.wave_id)
self.assertEqual('test.com!conv+root', root.wavelet_id)
self.assertEqual('WAVELET_PARTICIPANTS_CHANGED', event.type)
self.assertEqual({'participantsRemoved': [],
'participantsAdded': ['monty@appspot.com']},
event.properties)
self.robot.test_called = True
self.robot.test_called = False
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
self.assertTrue(self.robot.test_called)
operations = simplejson.loads(json)
# there should be one operation indicating the current version:
self.assertEqual(1, len(operations))
def testWrongEventsIgnored(self):
self.robot.test_called = True
def check(event, wavelet):
called = True
self.robot.test_called = False
self.robot.register_handler(events.BlipSubmitted,
check)
self.robot.process_events(TEST_JSON)
self.assertFalse(self.robot.test_called)
def testOperationParsing(self):
def check(event, wavelet):
wavelet.reply()
wavelet.title = 'new title'
wavelet.root_blip.append_markup('<b>Hello</b>')
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
expected = set([ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
ops.WAVELET_APPEND_BLIP,
ops.WAVELET_SET_TITLE,
ops.DOCUMENT_APPEND_MARKUP])
methods = [operation['method'] for operation in operations]
for method in methods:
self.assertTrue(method in expected)
expected.remove(method)
self.assertEquals(0, len(expected))
def testSerializeWavelets(self):
wavelet = self.robot.blind_wavelet(TEST_JSON)
serialized = wavelet.serialize()
unserialized = self.robot.blind_wavelet(serialized)
self.assertEquals(wavelet.creator, unserialized.creator)
self.assertEquals(wavelet.creation_time, unserialized.creation_time)
self.assertEquals(wavelet.last_modified_time,
unserialized.last_modified_time)
self.assertEquals(wavelet.root_blip.blip_id, unserialized.root_blip.blip_id)
self.assertEquals(wavelet.title, unserialized.title)
self.assertEquals(wavelet.wave_id, unserialized.wave_id)
self.assertEquals(wavelet.wavelet_id, unserialized.wavelet_id)
self.assertEquals(wavelet.domain, unserialized.domain)
def testProxiedBlindWavelet(self):
def handler(event, wavelet):
blind_wavelet = self.robot.blind_wavelet(TEST_JSON, 'proxyid')
blind_wavelet.reply()
blind_wavelet.submit_with(wavelet)
self.robot.register_handler(events.WaveletParticipantsChanged, handler)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
self.assertEqual(2, len(operations))
self.assertEquals(ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
operations[0]['method'])
self.assertEquals(ops.WAVELET_APPEND_BLIP, operations[1]['method'])
self.assertEquals('proxyid', operations[1]['params']['proxyingFor'])
def testCapabilitiesHashIncludesContextAndFilter(self):
robot1 = robot.Robot('Robot1')
robot1.register_handler(events.WaveletSelfAdded, lambda: '')
robot2 = robot.Robot('Robot2')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL)
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
robot3 = robot.Robot('Robot3')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL, filter="foo")
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
self.assertNotEqual(robot1.capabilities_hash(), robot3.capabilities_hash())
self.assertNotEqual(robot2.capabilities_hash(), robot3.capabilities_hash())
class TestGetCapabilitiesXml(unittest.TestCase):
def setUp(self):
self.robot = robot.Robot('Testy')
self.robot.capabilities_hash = lambda: '1'
def assertStringsEqual(self, s1, s2):
self.assertEqual(s1, s2, 'Strings differ:\n%s--\n%s' % (s1, s2))
def testDefault(self):
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testUrls(self):
profile_robot = robot.Robot(
'Testy',
image_url='http://example.com/image.png',
profile_url='http://example.com/profile.xml')
profile_robot.capabilities_hash = lambda: '1'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = profile_robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testConsumerKey(self):
# setup_oauth doesn't work during testing, so heavy handed setting of
# properties it is:
self.robot._consumer_key = 'consumer'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:consumer_key>consumer</w:consumer_key>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testCapsAndEvents(self):
self.robot.register_handler(events.BlipSubmitted, None,
context=[events.Context.SELF, events.Context.ROOT])
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n'
' <w:capability name="%s" context="SELF,ROOT"/>\n'
'</w:capabilities>\n'
'</w:robot>\n') % (ops.PROTOCOL_VERSION, events.BlipSubmitted.type)
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declares the api package."""
| Python |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Run robot from the commandline for testing.
This robot_runner let's you define event handlers using flags and takes the
json input from the std in and writes out the json output to stdout.
for example
cat events | commandline_robot_runner.py \
--eventdef-blip_submitted="wavelet.title='title'"
"""
__author__ = 'douwe@google.com (Douwe Osinga)'
import sys
import urllib
from google3.pyglib import app
from google3.pyglib import flags
from google3.walkabout.externalagents import api
from google3.walkabout.externalagents.api import blip
from google3.walkabout.externalagents.api import element
from google3.walkabout.externalagents.api import errors
from google3.walkabout.externalagents.api import events
from google3.walkabout.externalagents.api import ops
from google3.walkabout.externalagents.api import robot
from google3.walkabout.externalagents.api import util
FLAGS = flags.FLAGS
for event in events.ALL:
flags.DEFINE_string('eventdef_' + event.type.lower(),
'',
'Event definition for the %s event' % event.type)
def handle_event(src, bot, e, w):
"""Handle an event by executing the source code src."""
globs = {'e': e, 'w': w, 'api': api, 'bot': bot,
'blip': blip, 'element': element, 'errors': errors,
'events': events, 'ops': ops, 'robot': robot,
'util': util}
exec src in globs
def run_bot(input_file, output_file):
"""Run a robot defined on the command line."""
cmdbot = robot.Robot('Commandline bot')
for event in events.ALL:
src = getattr(FLAGS, 'eventdef_' + event.type.lower())
src = urllib.unquote_plus(src)
if src:
cmdbot.register_handler(event,
lambda event, wavelet, src=src, bot=cmdbot:
handle_event(src, bot, event, wavelet))
json_body = unicode(input_file.read(), 'utf8')
json_response = cmdbot.process_events(json_body)
output_file.write(json_response)
def main(argv):
run_bot(sys.stdin, sys.stdout)
if __name__ == '__main__':
app.run()
| Python |
# handler_xmpp.py
#
#
""" xmpp request handler. """
# set start time
import time
starttime = time.time()
## gozerlib imports
from gozerlib.utils.generic import fromenc, toenc, getversion
from gozerlib.utils.lazydict import LazyDict
from gozerlib.utils.exception import handle_exception
from gozerlib.plugins import plugs
from gozerlib.boot import boot
from gozerlib.admin import plugin_packages
from gozerlib.remote.event import RemoteEvent
from gozerlib.remote.bot import RemoteBot
## gaelib imports
from gozerlib.gae.xmpp.bot import XMPPBot
from gozerlib.gae.xmpp.event import XMPPEvent
from gozerlib.gae.utils.auth import checkuser
## google imports
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users as gusers
from google.appengine.ext import db
from google.appengine.ext.webapp import xmpp_handlers
from simplejson import loads
import wsgiref.handlers
import sys
import time
import types
import logging
logging.warn(getversion('XMPP'))
## define
bot = XMPPBot()
## functions
def xmppbox(response):
response.out.write("""
<form action="/_ah/xmpp/message/chat/" method="post">
<div><b>enter command:</b> <input type="commit" name="body">
</form>
""")
## classes
class XMPPHandler(webapp.RequestHandler):
""" relay incoming messages to the bot. """
def get(self):
xmppbox(self.response)
def post(self):
logging.info("XMPP incoming: %s" % self.request.remote_addr)
if not self.request.POST.has_key('from'):
logging.debug('no from in POST: %s' % str(self.request.POST))
return
if not self.request.POST.has_key('to'):
logging.debug('no to in POST: %s' % str(self.request.POST))
return
if not self.request.POST.has_key('body'):
logging.debug('no body in POST: %s' % str(self.request.POST))
return
event = XMPPEvent().parse(self.request, self.response)
event.bot = bot
remote = None
if event.txt.startswith('{') or 'appspotchat.com' in event.to:
remote = RemoteEvent()
remote.fromstring(event.txt)
remote.isremote = True
remote.remoteout = event.userhost
remote.bot = RemoteBot()
remote.title = event.channel
logging.warn('gozernet - in - %s - %s' % (remote.userhost, remote.txt))
remote.bot.doevent(remote)
else:
bot.doevent(event)
application = webapp.WSGIApplication([('/_ah/xmpp/message/chat/', XMPPHandler),
('/_ah/xmpp/message/chat', XMPPHandler)],
debug=True)
def main():
global application
global bot
global gnbot
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# handler_wave.py
#
#
""" this handler handles all the wave jsonrpc requests. """
## gozerlib imports
from gozerlib.utils.generic import getversion
from gozerlib.config import cfg
from gozerlib.errors import NoSuchCommand
## gaelib imports
from gozerlib.gae.wave.bot import WaveBot
## basic imports
import logging
import os
## defines
logging.warn(getversion('WAVE'))
# the bot
bot = WaveBot(domain=cfg.domain)
def main():
bot.run()
if __name__ == "__main__":
main()
| Python |
# handler_web.py
#
#
""" web request handler. """
import time
import logging
starttime = time.time()
#logging.debug('start time: %s' % time.ctime(time.time()))
## gozerlib imports
from gozerlib.utils.generic import fromenc, toenc, getversion
from gozerlib.utils.xmpp import stripped
from gozerlib.plugins import plugs
from gozerlib.config import cfg
from gozerlib.utils.exception import handle_exception
from gozerlib.boot import boot, getcmndtable, getpluginlist
from gozerlib.persist import Persist
from gozerlib.errors import NoSuchCommand
from gozerlib.utils.log import setloglevel
## gaelib import
from gozerlib.gae.web.bot import WebBot
from gozerlib.gae.web.event import WebEvent
from gozerlib.gae.utils.auth import checkuser
from gozerlib.gae.utils.web import commandbox, start, closer, loginurl, logouturl
## google imports
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users as gusers
## simplejson import
from simplejson import loads
## basic imports
import wsgiref.handlers
import sys
import time
import types
import os
import logging
## init
logging.warn(getversion('WEB'))
## define
bot = WebBot()
## classes
class DispatchHandler(webapp.RequestHandler):
""" the bots web command dispatcher. """
def get(self):
""" show basic page. """
global starttime
if starttime:
self.response.starttime = starttime
starttime = 0
else:
self.response.starttime = time.time()
(userhost, user, u, nick) = checkuser(self.response, self.request)
login = loginurl(self.response)
logout = logouturl(self.response)
self.response.out.write('<br>')
if not user:
start(self.response, {'appname': cfg['appname'] , 'plugins': getpluginlist() , 'who': 'login', 'loginurl': login, 'logouturl': logout, 'onload': 'void(0);'})
else:
start(self.response, {'appname': cfg['appname'] , 'plugins': getpluginlist() , 'who': userhost, 'loginurl': login, 'logouturl': logout, 'onload': 'void(0);'})
self.response.out.write('<br><div class="body"><i>"enter a command in the box above."</i><br></div>')
#closer(self.response)
def post(self):
""" this is where the command get disaptched. """
global starttime
if starttime:
self.response.starttime = starttime
starttime = 0
else:
self.response.starttime = time.time()
logging.debug("web - incoming - %s" % self.request.remote_addr)
login = loginurl(self.response)
logout = logouturl(self.response)
event = WebEvent().parse(self.response, self.request)
event.bot = bot
event.cbtype = "WEB"
self.response.out.write('<br>')
if not event.user:
start(self.response, {'appname': cfg['appname'] , 'plugins': getpluginlist() , 'who': 'login', 'loginurl': login, 'logouturl': logout, 'onload': 'putFocus(0,0);'})
else:
start(self.response, {'appname': cfg['appname'] , 'plugins': getpluginlist() , 'who': event.userhost, 'loginurl': login, 'logouturl': logout, 'onload': 'putFocus(0,0);'})
try:
bot.doevent(event)
#self.response.out.write('</div>')
except NoSuchCommand:
self.response.out.write("sorry no %s command found." % event.usercmnd)
except Exception, ex:
handle_exception(event)
closer(self.response)
class FeedListHandler(webapp.RequestHandler):
""" the bots web command dispatcher. """
def get(self):
""" show basic page. """
global starttime
from waveplugs.hubbub import HubbubWatcher
watcher = HubbubWatcher('hubbub')
feeds = watcher.getall()
for feed in feeds.values():
self.response.out.write("%s %s<br>\n" % (feed.data.name, feed.data.url))
## the application
application = webapp.WSGIApplication([('/', DispatchHandler),
('/dispatch', DispatchHandler),
('/dispatch/', DispatchHandler),
('/feeds', FeedListHandler),
('/feeds/', FeedListHandler)],
debug=True)
## main
def main():
global bot
global application
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# handler_task.py
#
#
""" feedprovider task handler. """
## gozerlib imports
from gozerlib.plugins import plugs
from gozerlib.utils.generic import getversion
from gozerlib.utils.exception import handle_exception
from gozerlib.tasks import taskmanager
## google imports
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
## simplejson import
from waveapi.simplejson import loads
## basic imports
import wsgiref.handlers
import logging
## vars
periodicals = []
mountpoints = ['periodical', ]
##
logging.info(getversion('TASK'))
for plugin in periodicals:
plugs.reload(plugin)
class TaskHandler(webapp.RequestHandler):
""" the bots task handler. """
def get(self):
""" this is where the task gets dispatched. """
path = self.request.path
if path.endswith('/'):
path = path[:-1]
taskname = path.split('/')[-1]
logging.debug("using taskname: %s" % taskname)
inputdict = {}
for name, value in self.request.environ.iteritems():
if not 'wsgi' in name:
inputdict[name] = value
try:
taskmanager.dispatch(taskname, inputdict)
except Exception, ex:
handle_exception()
def post(self):
""" this is where the task gets dispatched. """
path = self.request.path
if path.endswith('/'):
path = path[:-1]
taskname = path.split('/')[-1]
logging.debug("using taskname: %s taken from %s" % (taskname, path))
if not taskname:
return
inputdict = {}
for name, value in self.request.environ.iteritems():
if not 'wsgi' in name:
inputdict[name] = value
try:
taskmanager.dispatch(taskname, inputdict)
except Exception, ex:
handle_exception()
# the application
mountlist = []
for mount in mountpoints:
mountlist.append(('/tasks/%s' % mount, TaskHandler))
application = webapp.WSGIApplication(mountlist, debug=True)
def main():
global application
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# handler_gadgetexec.py
#
#
""" feedprovider exec handler. just return the results in a <div>. """
## gozerlib imports
from gozerlib.utils.generic import fromenc, toenc, getversion
from gozerlib.utils.xmpp import stripped
from gozerlib.plugins import plugs
from gozerlib.persist import Persist
from gozerlib.utils.exception import handle_exception
from gozerlib.boot import boot
from gozerlib.fleet import fleet
from gozerlib.config import cfg as maincfg
from gozerlib.errors import NoSuchCommand
## gaelib imports
from gozerlib.gae.wave.bot import WaveBot
from gozerlib.gae.web.bot import WebBot
from gozerlib.gae.web.event import WebEvent
from gozerlib.gae.utils.web import execbox, commandbox, closer
## google imports
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users as gusers
## simplejson import
from simplejson import loads
## basic imports
import wsgiref.handlers
import sys
import time
import types
import os
import logging
logging.warn(getversion('GADGETEXEC'))
#webbot = WebBot()
wavebot = WaveBot()
class HB_Handler(webapp.RequestHandler):
""" the bots exec command dispatcher. """
def get(self):
commandbox(self.response, "/gadgetexec/")
def post(self):
""" this is where the command get disaptched. """
logging.debug("HBEXEC incoming: %s" % self.request.remote_addr)
event = WebEvent().parse(self.response, self.request)
event.bot = wavebot
event.cbtype = 'HBEXEC'
event.iswave = True
event.isgadget = True
try:
wavebot.doevent(event)
except NoSuchCommand:
event.reply("no %s command found" % event.usercmnd)
except Exception, ex:
handle_exception(event)
# the application
application = webapp.WSGIApplication([('/gadgetexec', HB_Handler),
('/gadgetexec/', HB_Handler)],
debug=True)
def main():
global webbot
global application
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, pos)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
try:
from simplejson import _speedups
return _speedups.encode_basestring_ascii, _speedups.make_encoder
except ImportError:
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from simplejson.decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and not self.indent and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
import unittest
import doctest
class OptionalExtensionTestSuite(unittest.TestSuite):
def run(self, result):
import simplejson
run = unittest.TestSuite.run
run(self, result)
simplejson._toggle_speedups(False)
run(self, result)
simplejson._toggle_speedups(True)
return result
def additional_tests(suite=None):
import simplejson
import simplejson.encoder
import simplejson.decoder
if suite is None:
suite = unittest.TestSuite()
for mod in (simplejson, simplejson.encoder, simplejson.decoder):
suite.addTest(doctest.DocTestSuite(mod))
suite.addTest(doctest.DocFileSuite('../../index.rst'))
return suite
def all_tests_suite():
suite = unittest.TestLoader().loadTestsFromNames([
'simplejson.tests.test_check_circular',
'simplejson.tests.test_decode',
'simplejson.tests.test_default',
'simplejson.tests.test_dump',
'simplejson.tests.test_encode_basestring_ascii',
'simplejson.tests.test_encode_for_html',
'simplejson.tests.test_fail',
'simplejson.tests.test_float',
'simplejson.tests.test_indent',
'simplejson.tests.test_pass1',
'simplejson.tests.test_pass2',
'simplejson.tests.test_pass3',
'simplejson.tests.test_recursion',
'simplejson.tests.test_scanstring',
'simplejson.tests.test_separators',
'simplejson.tests.test_speedups',
'simplejson.tests.test_unicode',
'simplejson.tests.test_decimal',
])
suite = additional_tests(suite)
return OptionalExtensionTestSuite([suite])
def main():
runner = unittest.TextTestRunner()
suite = all_tests_suite()
runner.run(suite)
if __name__ == '__main__':
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
main()
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.0rc3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
#
from setuptools import setup
import glob
import os
upload = []
def uploadlist(dir):
upl = []
for file in os.listdir(dir):
if not file or file.startswith('.'):
continue
d = dir + os.sep + file
if os.path.isdir(d):
#upload.append(dir + os.sep + file)
upl.extend(uploadlist(d))
else:
if file.endswith(".pyc"):
continue
upl.append(d)
return upl
upload = uploadlist('gaeupload')
setup(
name='feedprovider',
version='0.2.1',
url='http://feedprovider.googlecode.com/',
download_url="http://code.google.com/p/feedprovider/downloads",
author='Bart Thate',
author_email='bthate@gmail.com',
description='The hubbub bot of the future!',
license='MIT',
scripts = ['bin/fpr',
'bin/fpr-irc',
'bin/fpr-release',
'bin/fpr-run',
'bin/fpr-upload'],
packages=['gozerlib',
'gozerlib.utils',
'gozerlib.gae',
'gozerlib.gae.utils',
'gozerlib.gae.web',
'gozerlib.gae.wave',
'gozerlib.gae.xmpp',
'gozerlib.socket',
'gozerlib.socket.irc',
'gozerlib.socket.utils',
'gozerlib.socket.rest',
'gozerlib.remote',
'gozerlib.contrib',
'gozerlib.plugs',
'waveplugs',
'commonplugs',
'socketplugs'],
package_dir={'feedprovider': ['gozerlib', 'waveplugs', 'commonplugs', 'socketplugs']},
long_description = """ FEEDPROVIDER is a wave and xmpp bot for pushing pubsubhubbub feeds to Google Wave and Jabber (see feedprovider@appspot.com). Combined with a feed fetching service like superfeedr.com it can deliver your feeds on multiple platforms (wave and xmpp are suported now though xmpp conferences aren't yet) - FEEDPROVIDER runs on the Google Application Engine - IRC support is on its way. """,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: Other OS',
'Programming Language :: Python',
'Topic :: Communications :: Chat',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
test_suite = 'nose.collector',
data_files=[('config', uploadlist('config')),
('tests', uploadlist('tests')),
('simplejson', uploadlist('simplejson')),
('gaeupload', uploadlist('gaeupload')),
('gaeupload/assets', uploadlist('gaeupload/assets')),
('gaeupload/templates', uploadlist('gaeupload/templates')),
('gaeupload/waveapi', uploadlist('gaeupload/waveapi')),
('gaeupload/waveapi/oauth', uploadlist('gaeupload/waveapi/oauth')),
('gaeupload/gadgets', uploadlist('gaeupload/gadgets'))],
)
| Python |
# socketplugs/remoteserver.py
#
#
## gozerlib imports
from gozerlib.callbacks import callbacks
from gozerlib.utils.url import posturl, getpostdata
from gozerlib.persiststate import PlugState
from gozerlib.commands import cmnds
from gozerlib.socket.irc.monitor import outmonitor
from gozerlib.socket.rest.server import RestServer, RestRequestHandler
from gozerlib.remote.event import RemoteEvent
from gozerlib.remote.bot import RemoteBot
from gozerlib.utils.exception import handle_exception
from gozerlib.examples import examples
## socketplugs imports
from socketplugs.restserver import startserver, stopserver
from commonplugs.remote import state
## simplejson imports
from simplejson import dumps
## basic imports
import socket
import re
import logging
## VARS
outurl = "http://feedprovider.appspot.com/remote/"
## callbacks
def preremote(bot, event):
if state.data.relay and event.channel in state.data.relay:
return True
def handle_doremote(bot, event):
if event.isremote:
return
e = RemoteEvent(bot.server, event.tojson())
e.makeid()
bot = RemoteBot(state.data.outs)
bot.broadcast(e)
callbacks.add('PRIVMSG', handle_doremote, preremote, threaded=True)
callbacks.add('OUTPUT', handle_doremote, preremote, threaded=True)
callbacks.add('MESSAGE', handle_doremote, preremote, threaded=True)
callbacks.add('BLIP_SUBMITTED', handle_doremote, preremote, threaded=True)
outmonitor.add('remote', handle_doremote, preremote, threaded=True)
## server part
server = None
def remote_POST(server, request):
try:
input = getpostdata(request)
container = input['container']
except KeyError, AttributeError:
logging.warn("remote - %s - can't determine eventin" % request.ip)
return dumps(["can't determine eventin"])
event = EventBase()
event.load(container)
callbacks.check(event)
return dumps(['ok',])
def remote_GET(server, request):
try:
path, container = request.path.split('#', 1)
except ValueError:
logging.warn("remote - %s - can't determine eventin" % request.ip)
return dumps(["can't determine eventin", ])
try:
event = EventBase()
event.load(container)
callbacks.check(event)
except Exception, ex:
handle_exception()
return dumps(['ok', ])
def start():
global server
server = startserver()
if not server:
return
try:
server.addhandler('/remote/', 'POST', remote_POST)
server.addhandler('/remote/', 'GET', remote_GET)
server.enable('/remote/')
except Exception, ex:
handle_exception()
## plugin init
def init():
start()
def shutdown():
global server
if server:
server.disable('/remote/')
server.disable('/remote/')
def handle_remoteserver_start(bot, event):
""" add the /remote/ mountpoints to the REST server. """
init()
event.done()
cmnds.add('remoteserver-start', handle_remoteserver_start, 'OPER')
examples.add('remoteserver-start', 'initialize the FEEDPROVIDER remote event network server', 'remoteserver-start')
def handle_remoteserver_stop(bot, event):
""" remove the /remote/ mountpoints from the REST server. """
shutdown()
event.done()
cmnds.add('remoteserver-stop', handle_remoteserver_stop, 'OPER')
examples.add('remoteserver-stop', 'stop the FEEDPROVIDER remote event network server', 'remoteserver-stop')
| Python |
# socketplugs/restserver.py
#
#
## gozerlib imports
from gozerlib.callbacks import callbacks
from gozerlib.utils.url import posturl, getpostdata
from gozerlib.persistconfig import PersistConfig
from gozerlib.commands import cmnds
from gozerlib.socket.irc.monitor import outmonitor
from gozerlib.socket.rest.server import RestServer, RestRequestHandler
from gozerlib.eventbase import EventBase
from gozerlib.utils.exception import handle_exception
from gozerlib.examples import examples
## simplejson imports
from simplejson import dumps
## basic imports
import socket
import re
import logging
## defines
enable = True
try:
cfg = PersistConfig()
cfg.define('enable', 0)
cfg.define('host' , socket.gethostbyname(socket.getfqdn()))
cfg.define('name' , socket.getfqdn())
cfg.define('port' , 10102)
cfg.define('disable', [])
hp = "%s:%s" % (cfg.get('host'), cfg.get('port'))
url = "http://%s" % hp
except AttributeError:
# we are on the GAE
enable = False
## server part
server = None
## functions
def startserver(force=False):
if not enable:
logging.warn("rest server is disabled")
return
global server
if server and not force:
logging.info("REST server is already running. ")
return server
try:
server = RestServer((cfg.get('host'), cfg.get('port')), RestRequestHandler)
if server:
server.start()
logging.warn('restserver - running at %s:%s' % (cfg.get('host'), cfg.get('port')))
for mount in cfg.get('disable'):
server.disable(mount)
else:
logging.error('restserver - failed to start server at %s:%s' % (cfg.get('host'), cfg.get('port')))
except socket.error, ex:
logging.warn('restserver - start - socket error: %s' % str(ex))
except Exception, ex:
handle_exception()
return server
def stopserver():
try:
if not server:
logging.warn('restserver - server is already stopped')
return
server.shutdown()
except Exception, ex:
handle_exception()
pass
## plugin init
def init():
if cfg['enable']:
startserver()
def shutdown():
if cfg['enable']:
stopserver()
def handle_rest_start(bot, event):
cfg['enable'] = 1
cfg.save()
startserver()
event.done()
cmnds.add('rest-start', handle_rest_start, 'OPER')
examples.add('rest-start', 'start the REST server', 'rest-start')
def handle_rest_stop(bot, event):
cfg['enable'] = 0
cfg.save()
stopserver()
event.done()
cmnds.add('rest-stop', handle_rest_stop, 'OPER')
examples.add('rest-stop', 'stop the REST server', 'rest-stop')
| Python |
# feedprovider socket related plugins
#
#
""" this package contains all the socket related plugins. """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
import unittest | Python |
# gozerlib/fleet.py
#
#
""" fleet is a list of bots. """
## gozerlib imports
from utils.exception import handle_exception
from utils.generic import waitforqueue
from config import Config
from config import cfg as mainconfig
from users import users
from plugins import plugs
from persist import Persist
from errors import NoSuchBotType
## waveapi imports
from simplejson import load
## basic imports
import Queue
import os
import types
import time
import glob
import logging
## classes
class FleetBotAlreadyExists(Exception):
pass
class Fleet(Persist):
"""
a fleet contains multiple bots (list of bots).
"""
def __init__(self):
Persist.__init__(self, 'fleet')
if not self.data.has_key('names'):
self.data['names'] = []
self.bots = []
def loadall(self):
for name in self.data.names:
self.makebot(None, name)
def avail(self):
return self.data['names']
def getfirstbot(self):
"""
return the main bot of the fleet.
:rtype: gozerlib.botbase.BotBase
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.getfirstbot
"""
return self.bots[0]
def getfirstjabber(self):
"""
return the first jabber bot of the fleet.
:rtype: gozerlib.botbase.BotBase
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.getfirstjabber
"""
for bot in self.bots:
if bot.type == 'xmpp' or bot.type == 'jabber':
return bot
def size(self):
"""
return number of bots in fleet.
:rtype: integer
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.size
"""
return len(self.bots)
def settype(self, name, type):
cfg = Config('fleet' + os.sep + name + os.sep + 'config')
cfg['name'] = name
logging.warn("fleet - %s - setting type to %s" % (self.cfile, type))
cfg.type = type
cfg.save()
def makebot(self, type=None, name=None, domain="", cfg={}):
"""
create a bot .. use configuration if provided.
:param name: the name of the bot
:type name: string
:param cfg: configuration file for the bot
:type cfg: gozerlib.config.Config
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.makebot
"""
logging.info('fleet - making %s (%s) bot - %s' % (type, name, str(cfg)))
bot = None
name = name or 'default-%s' % type
if not cfg:
cfg = Config('fleet' + os.sep + name + os.sep + 'config')
cfg['name'] = name
if not cfg.type and type:
logging.warn("fleet - %s - setting type to %s" % (cfg.cfile, type))
cfg.type = type
cfg.save()
if not cfg['type']:
try:
self.data['names'].remove(name)
self.save()
except ValueError:
pass
raise Exception("no bot type specified")
if not cfg.owner:
cfg.owner = mainconfig.owner
if not cfg['domain'] and domain:
cfg['domain'] = domain
cfg.save()
if not cfg:
raise Exception("can't make config for %s" % name)
type = type or cfg['type']
# create bot based on type
if type == 'xmpp' or type == 'jabber':
from gozerlib.gae.xmpp.bot import XMPPBot
bot = XMPPBot(cfg)
elif type == 'web':
from gozerlib.gae.web.bot import WebBot
bot = WebBot(cfg)
elif type == 'wave':
from gozerlib.gae.wave.bot import WaveBot
dom = cfg.domain or domain
bot = WaveBot(cfg, domain=dom)
elif type == 'remote':
from gozerlib.remote.bot import RemoteBot
bot = RemoteBot(cfg)
else:
raise NoSuchBotType('%s bot .. unproper type %s' % (name, type))
# set bot name and initialize bot
if bot:
if name and name not in self.data['names']:
self.data['names'].append(name)
self.save()
self.addbot(bot)
return bot
# failed to created the bot
raise Exception("can't make %s bot" % name)
def makewavebot(self, domain, cfg={}):
"""
create a bot .. use configuration if provided.
:param name: the name of the bot
:type name: string
:param cfg: configuration file for the bot
:type cfg: gozerlib.config.Config
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.makebot
"""
logging.info('fleet - making %s (%s) wave bot - %s' % (domain, 'wave', str(cfg)))
bot = None
type = 'wave'
name = domain
if not cfg:
cfg = Config('fleet' + os.sep + name + os.sep + 'config')
cfg['name'] = domain
if not cfg.owner:
cfg.owner = mainconfig.owner
if not cfg['domain']:
cfg['domain'] = domain
cfg.save()
if not cfg.type:
logging.warn("fleet - %s - setting type to %s" % (cfg.cfile, type))
cfg.type = type
cfg.save()
if not cfg['type']:
try:
self.data['names'].remove(name)
self.save()
except ValueError:
pass
raise Exception("no bot type specified")
if not cfg:
raise Exception("can't make config for %s" % name)
type = 'wave'
# create bot based on type
from gozerlib.gae.wave.bot import WaveBot
bot = WaveBot(cfg, domain=domain)
# set bot name and initialize bot
if bot:
if name not in self.data['names']:
self.data['names'].append(name)
self.save()
self.addbot(bot)
return bot
# failed to created the bot
raise Exception("can't make %s bot" % name)
def save(self):
"""
save fleet data and call save on all the bots.
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.save
"""
Persist.save(self)
for i in self.bots:
try:
i.save()
except Exception, ex:
handle_exception()
def list(self):
"""
return list of bot names.
:rtype: list
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.list
"""
result = []
for i in self.bots:
result.append(i.name)
return result
def stopall(self):
"""
call stop() on all fleet bots.
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.stopall
"""
for i in self.bots:
try:
i.stop()
except:
pass
def byname(self, name):
"""
return bot by name.
:param name: name of the bot
:type name: string
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.byname
"""
for i in self.bots:
if name == i.name:
return i
def bydomain(self, domain):
"""
return bot by name.
:param name: name of the bot
:type name: string
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.byname
"""
for i in self.bots:
if domain == i.domain:
return i
return self.makewavebot(domain)
def replace(self, name, bot):
"""
replace bot with a new bot.
:param name: name of the bot to replace
:type name: string
:param bot: bot to replace old bot with
:type bot: gozerlib.botbase.BotBase
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.replace
"""
for i in range(len(self.bots)):
if name == self.bots[i].name:
self.bots[i] = bot
return
def addbot(self, bot):
"""
add a bot to the fleet .. remove all existing bots with the
same name.
:param bot: bot to add
:type bot: gozerlib.botbase.BotBase
:rtype: boolean
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.addbot
"""
if bot:
for i in range(len(self.bots)-1, -1, -1):
if self.bots[i].name == bot.name:
logging.debug('fleet - removing %s from fleet' % bot.name)
del self.bots[i]
logging.debug('fleet - adding %s' % bot.name)
self.bots.append(bot)
return True
return False
def delete(self, name):
"""
delete bot with name from fleet.
:param name: name of bot to delete
:type name: string
:rtype: boolean
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.delete
"""
for i in self.bots:
if i.name == name:
i.exit()
self.remove(i)
i.cfg['enable'] = 0
i.cfg.save()
logging.debug('fleet - %s disabled' % i.name)
return True
return False
def remove(self, bot):
"""
delete bot by object.
:param bot: bot to delete
:type bot: gozerlib.botbase.BotBase
:rtype: boolean
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.remove
"""
try:
self.bots.remove(bot)
return True
except ValueError:
return False
def exit(self, name=None, jabber=False):
"""
call exit on all bots. if jabber=True only jabberbots will exit.
:param name: name of the bot to exit. if not provided all bots will exit.
:type name: string
:param jabber: flag to set when only jabberbots should exit
:type jabber: boolean
:rtype: boolean
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.exit
"""
if not name:
threads = []
for i in self.bots:
i.exit()
return
for i in self.bots:
if i.name == name:
try:
i.exit()
except:
handle_exception()
self.remove(i)
return True
return False
def cmnd(self, event, name, cmnd):
"""
do command on a bot.
:param event: event to pass on to the dispatcher
:type event: gozerlib.event.EventBase
:param name: name of the bot to pass on to the dispatcher
:type name: string
:param cmnd: command to execute on the fleet bot
:type cmnd: string
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.cmnd
"""
bot = self.byname(name)
if not bot:
return 0
from gozerlib.eventbase import EventBase
j = plugs.clonedevent(bot, event)
j.onlyqueues = True
j.txt = cmnd
q = Queue.Queue()
j.queues = [q]
j.speed = 3
plugs.trydispatch(bot, j)
result = waitforqueue(q)
if not result:
return
res = ["[%s]" % bot.name, ]
res += result
event.reply(res)
def cmndall(self, event, cmnd):
"""
do a command on all bots.
:param event: event to pass on to dispatcher
:type event: gozerlib.eventbase.EventBase
:param cmnd: the command string to execute
:type cmnd: string
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.cmndall
"""
for i in self.bots:
self.cmnd(event, i.name, cmnd)
def broadcast(self, txt):
"""
broadcast txt to all bots.
:param txt: text to broadcast on all bots
:type txt: string
.. literalinclude:: ../../gozerlib/fleet.py
:pyobject: Fleet.broadcast
"""
for i in self.bots:
i.broadcast(txt)
# main fleet object
fleet = Fleet()
| Python |
# gozerlib/config.py
#
#
""" config module. """
## gozerlib imports
from utils.trace import whichmodule
from utils.lazydict import LazyDict
from utils.exception import handle_exception
from datadir import datadir
from errors import CantSaveConfig
## simplejson imports
from simplejson import loads, dumps
## basic imports
import os
import types
import thread
import logging
## classes
class Config(LazyDict):
"""
config class is a dict containing json strings. is writable to file
and human editable.
:param filename: filename of the config file
:type filename: string
:param verbose: whether loading of config should ne verbose
:type verbose: boolean
:rtype: self
"""
def __init__(self, filename=None, verbose=False, *args, **kw):
LazyDict.__init__(self, *args, **kw)
#self.dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__ + os.sep + '..')))
self.filename = filename or 'mainconfig'
self.dir = 'config'
self.cfile = self.dir + os.sep + self.filename
self.init()
self.jsondb = None
try:
self.fromfile(self.cfile)
self.isdb = False
except IOError:
from persist import Persist
self.jsondb = Persist(self.cfile)
self.update(self.jsondb.data)
self.isdb = True
logging.debug("config - fromdb - %s - %s" % (self.cfile, str(self)))
def __getitem__(self, item):
if not self.has_key(item):
return None
else:
return dict.__getitem__(self, item)
def set(self, item, value):
"""
set item to value.
:param item: item to set value of
:type item: string
:param value: value to set
:type value: dict, list, string, number or boolean
"""
dict.__setitem__(self, item, value)
return self
def fromdb(self):
""" read config from database. """
from gozerlib.persist import Persist
logging.info("config - fromdb - %s" % self.cfile)
tmp = Persist(self.cfile)
self.update(tmp.data)
return self
def todb(self):
""" save config to database. """
cp = dict(self)
del cp['jsondb']
if not self.jsondb:
from gozerlib.persist import Persist
self.jsondb = Persist(self.cfile)
self.jsondb.data = cp
self.jsondb.save()
return self
def fromfile(self, filename):
"""
read config object from filename.
:param filename: name of file to write to
:type filename: string
:rtype: self
"""
curline = ""
# read file and set config values to loaded JSON entries
fname = filename
logging.info("config - fromfile - %s" % fname)
if not os.path.exists(fname):
return self
# open file
f = open(fname, 'r')
# loop over data in config file
for line in f:
curline = line
line = line.strip()
if not line or line.startswith('#'):
continue
else:
key, value = line.split('=', 1)
self[key.strip()] = loads(unicode(value.strip()))
return self
def tofile(self, filename=None):
""" save config object to file. """
if not filename:
filename = self.cfile
try:
from os import mkdir
except ImportError:
logging.debug("can't save %s to file .. os.mkdir() not suported" % filename)
return
ddir = "."
d = []
for p in filename.split(os.sep)[:-1]:
d.append(p)
ddir = os.sep.join(d)
if not os.path.isdir(ddir):
logging.warn("persist - creating %s dir" % ddir)
try:
os.mkdir(ddir)
except OSError, ex:
logging.warn("persist - not saving - failed to make %s - %s" % (ddir, str(ex)))
return
#logging.warn("config - tofile - %s" % ddir)
written = []
curitem = None
try:
# read existing config file if available
try:
configlist = open(filename, 'r').readlines()
except IOError:
configlist = []
# make temp file
configtmp = open(filename + '.tmp', 'w')
teller = 0
# write header if not already there
if not configlist:
configtmp.write('# %s\n\n' % self.cfile)
# loop over original lines replacing updated data
for line in configlist:
teller += 1
# skip comment
if line.startswith('#'):
configtmp.write(line)
continue
# take part after the =
try:
keyword = line.split('=')[0].strip()
curitem = keyword
except IndexError:
configtmp.write(line)
continue
# write JSON string of data
if self.has_key(keyword):
configtmp.write('%s = %s\n' % (keyword, dumps(self[keyword])))
written.append(keyword)
else:
configtmp.write(line)
# write data not found in original config file
for keyword, value in self.iteritems():
if keyword in written:
continue
if keyword == 'jsondb':
continue
if keyword == 'isdb':
continue
if keyword == 'optionslist':
continue
curitem = keyword
configtmp.write('%s = %s\n' % (keyword, dumps(value)))
# move temp file to original
configtmp.close()
try:
os.rename(filename + '.tmp', self.cfile)
except WindowsError:
# no atomic operation supported on windows! error is thrown when destination exists
os.remove(filename)
os.rename(filename + '.tmp', self.cfile)
return teller
except Exception, ex:
print "ERROR WRITING %s CONFIG FILE: %s .. %s" % (self.cfile, str(ex), curitem)
return self
def save(self):
if self.isdb:
self.todb()
else:
self.tofile()
def load(self, verbose=False):
"""
load the config file.
:param verbose: whether loading should be verbose
"""
self.init()
if self.isdb:
self.fromdb()
if verbose:
logging.debug('PRE LOAD config %s' % str(self))
return self
def init(self):
if self.filename == 'mainconfig':
self.setdefault('owner', [])
self.setdefault('loglist', [])
self.setdefault('quitmsg', "http://feedprovider.googelcode.com")
self.setdefault('debug', 0)
self.setdefault('plugdeny', [])
self.setdefault('dotchars', " .. ")
self.setdefault('floodallow', 1)
self.setdefault('auto_register', 1)
self.setdefault('ondemand', 1)
self['version'] = "FEEDPROVIDER 0.2.1"
return self
def reload(self):
"""
reload the config file.
"""
self.load()
return self
def ownercheck(userhost):
"""
check whether userhost is a owner.
:param userhost: userhost to check
:type userhost: string
:rtype: boolean
"""
if not userhost:
return False
if userhost in cfg['owner']:
return True
return False
## define
cfg = Config()
| Python |
# gozerbot/persistconfig.py
#
#
""" allow data to be pickled to disk .. creating the persisted object
restores data.
usage:
!plug-cfg -> shows list of all config
!plug-cfg key value -> sets value to key
!plug-cfg key -> shows list of key
!plug-cfg key add value -> adds value to list
!plug-cfg key remove value -> removes value from list
!plug-cfg key clear -> clears entire list
!plug-cfgsave -> force save configuration to disk
todo:
- plugin api (more work needed?)
"""
__copyright__ = 'this file is in the public domain'
__author__ = 'Bas van Oostveen'
## gozerlib imports
from gozerlib.utils.trace import calledfrom, whichplugin
from gozerlib.commands import cmnds, Command
from gozerlib.examples import examples
from gozerlib.datadir import datadir
from gozerlib.persist import Persist
from gozerlib.config import Config
from gozerlib.config import cfg as config
## basic imports
import sys
import os
import types
import time
import logging
## classes
class PersistConfigError(Exception): pass
class PersistConfig(Config):
""" persist plugin configuration and create default handlers. """
def __init__(self):
self.hide = []
self.modname = whichplugin()
self.plugname = self.modname.split('.')[-1]
Config.__init__(self, 'plugs' + os.sep + self.plugname, "config")
cmndname = "%s-cfg" % self.plugname
logging.debug('persistconfig - added command %s (%s)' % (cmndname, self.plugname))
cmnds[cmndname] = Command(self.plugname, cmndname, self.cmnd_cfg, ['OPER', ])
examples.add(cmndname, "plugin configuration", cmndname)
cmndnamesave = cmndname + "save"
cmnds[cmndnamesave] = Command(self.plugname, cmndname, self.cmnd_cfgsave, ['OPER',])
examples.add(cmndnamesave, "save plugin configuration", cmndnamesave)
### cmnds
def show_cfg(self, bot, ievent):
""" show config options. """
s = []
for key, optionvalue in sorted(self.iteritems()):
if key in self.hide:
continue
v = optionvalue
if type(v) in [str, unicode]:
v = '"'+v+'"'
v = str(v)
s.append("%s=%s" % (key, v))
ievent.reply("options: " + ' .. '.join(s))
def cmnd_cfgsave(self, bot, ievent):
""" save config. """
self.save()
ievent.reply("config saved")
def cmnd_cfg_edit(self, bot, ievent, args, key, optionvalue):
""" edit config values. """
if not self.has_key(key):
ievent.reply('option %s is not defined' % key)
return
if key in self.hide:
return
if type(optionvalue) == types.ListType:
if args[0].startswith("[") and args[-1].endswith("]"):
values = []
for v in ' '.join(args)[1:-1].replace(", ", ",").split(","):
if v[0]=='"' and v[-1]=='"':
# string
v = v.replace('"', '')
elif v[0]=="'" and v[-1]=="'":
# string
v = v.replace("'", "")
elif '.' in v:
# float
try:
v = float(v)
except ValueError:
ievent.reply("invalid long literal: %s" % v)
return
else:
# int
try:
v = int(v)
except ValueError:
ievent.reply("invalid int literal: %s" % v)
return
values.append(v)
self.set(key, values)
self.save()
ievent.reply("%s set %s" % (key, values))
return
command = args[0]
value = ' '.join(args[1:])
if command == "clear":
self.clear(key)
self.save()
ievent.reply("list empty")
elif command == "add":
self.append(key, value)
self.save()
ievent.reply("%s added %s" % (key, value))
elif command == "remove" or command == "del":
try:
self.remove(key, value)
self.save()
ievent.reply("%s removed" % str(value))
except ValueError:
ievent.reply("%s is not in list" % str(value))
else:
ievent.reply("invalid command")
return
else:
value = ' '.join(args)
try:
value = type(optionvalue)(value)
except:
pass
if type(value) == type(optionvalue):
self.set(key, value)
self.save()
ievent.reply("%s set" % key)
elif type(value) == types.LongType and \
type(option.value) == types.IntType:
# allow upscaling from int to long
self.set(key, value)
self.save()
ievent.reply("%s set" % key)
else:
ievent.reply("value %s (%s) is not of the same type as %s \
(%s)" % (value, type(value), optionvalue, type(optionvalue)))
def cmnd_cfg(self, bot, ievent):
""" the config (cfg) command. """
if not ievent.args:
self.show_cfg(bot, ievent)
return
argc = len(ievent.args)
key = ievent.args[0]
try:
optionvalue = self[key]
except KeyError:
ievent.reply("%s option %s not found" % (self.plugname, key))
return
if key in self.hide:
return
if argc == 1:
ievent.reply(str(optionvalue))
return
self.cmnd_cfg_edit(bot, ievent, ievent.args[1:], key, optionvalue)
def generic_cmnd(self, key):
""" command for editing config values. """
def func(bot, ievent):
try:
optionvalue = self[key]
except KeyError:
ievent.reply("%s not found" % key)
# need return ?
if not isinstance(option, Option):
logging.warn('persistconfig - option %s is not a valid option' % key)
return
if ievent.args:
value = ' '.join(ievent.args)
try:
value = type(optionvalue)(value)
except:
pass
self.cmnd_cfg_edit(bot, ievent, ievent.args, key, optionvalue)
else:
ievent.reply(str(optionvalue))
return func
### plugin api
def define(self, key, value=None, desc="plugin option", perm='OPER', \
example="", name=None, exposed=True):
""" define initial value. """
if name:
name = name.lower()
if not exposed:
self.hide.append(key)
if not self.has_key(key):
if name == None:
name = "%s-cfg-%s" % (self.plugname, str(key))
self[key] = value
def undefine(self, key, throw=False):
""" remove a key. """
try:
del self[key]
return True
except KeyError, e:
if throw:
raise
self.save()
return False
def set(self, key, value, throw=False):
""" set a key's value. """
self[key] = value
def append(self, key, value):
""" append a value. """
self[key].append(value)
def remove(self, key, value):
""" remove a value. """
self[key].remove(value)
def clear(self, key):
""" clear a value. """
self[key] = []
def get(self, key, default=None):
""" get value of key. """
try:
return self[key]
except KeyError:
return default
| Python |
# gozerlib/remote/bot.py
#
#
""" remote bot. handlers incoming nodes. """
## gozerlib imports
from gozerlib.utils.url import posturl, getpostdata
from gozerlib.botbase import BotBase
from event import RemoteEvent, Container
import logging
class RemoteBot(BotBase):
""" RemoteBot broadcasts events through HTTP POST calls. """
def __init__(self, cfg=None, users=None, plugs=None, jid=None, *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, jid, *args, **kwargs)
if self.cfg:
self.cfg['type'] = 'remote'
self.type = "remote"
self.outs = []
def addouts(self, outs):
for out in outs:
if out not in self.outs:
self.outs.append(out)
return self
def _raw(self, url, data, *args, **kwargs):
container = Container(self.jid, data)
container.makeid()
posturl(url, {}, {"container": container.dump()})
def broadcast(self, data, *args, **kwargs):
for url in self.outs:
self._raw(url, data, *args, **kwargs)
def say(self, channel, txt, event={}, *args, **kwargs):
logging.warn('remote - out - %s - %s' % (channel, txt))
re = RemoteEvent()
if event:
re.copyin(event)
else:
re.userhost = "%s.%s" % (self.name, self.server or 'feedprovider.appspot.com')
re.nick = self.name
re.isreply = True
re.iscallback = False
re.fromm = self.jid
re.txt = re.origtxt = txt
re.iscmnd = False
re.botoutput = True
re.isresponse = True
re.remotecmnd = False
re.bot = self.target
self.broadcast(re.dump(), *args, **kwargs)
def cmnd(self, event, txt, *args, **kwargs):
logging.warn('remote - cmnd - %s - %s - %s' % (str(self.target), self.outs, txt))
re = RemoteEvent()
re.copyin(event)
re.isreply = True
re.printto = event.userhost
re.target = event.userhost
re.txt = re.origtxt = txt
re.iscmnd = True
re.remotecmnd = True
re.remoteout = self.jid
re.bot = self.target
self.broadcast(re.dump(), *args, **kwargs)
| Python |
# gozerlib/remote/event.py
#
#
""" gozerlib remote event. """
## gozerlib imports
from gozerlib.eventbase import EventBase
from gozerlib.utils.generic import splittxt
from gozerlib.utils.lazydict import LazyDict
## simplejson imports
from simplejson import loads
## basic imports
import cgi
import logging
import copy
import time
import uuid
## defines
cpy = copy.deepcopy
idattributes = ['origin', 'type', 'payload', 'idtime']
## functions
def getid(container):
name = ""
for attr in idattributes:
try:
name += str(container[attr])
except KeyError:
pass
return uuid.uuid3(uuid.NAMESPACE_URL, name).hex
## classes
class Container(LazyDict):
def __init__(self, origin, payload, type="event"):
LazyDict.__init__(self)
self.createtime = time.time()
self.origin = origin
self.payload = unicode(payload)
self.type = str(type)
def makeid(self):
self.idtime = time.time()
self.id = getid(self)
class RemoteEvent(EventBase):
def __init__(self):
EventBase.__init__(self)
self.type = "remote"
def __deepcopy__(self, a):
e = RemoteEvent()
e.copyin(self)
return e
def parse(self, response, request):
""" parse request/response into a RemoteEvent. """
logging.warn(u'%s %s' % (dir(request), dir(response)))
logging.warn(str(request))
eventin = request.get('container')
if not eventin:
eventin = request.environ.get('QUERY_STRING')
if not eventin:
return ["can't determine eventin", ]
origin = request.get('origin')
if not origin:
origin = str(request.remote_addr)
#logging.info(eventin)
logging.warn(u"remote.event - %s - parsing %s" % (origin, unicode(eventin)))
container = LazyDict(loads(eventin))
self.load(container.payload)
self.isremote = True
self.response = response
self.request = request
self.remoteout = origin
logging.info(u'remote.event - in - %s - %s' % (self.userhost, self.txt))
return self
def _raw(self, txt, end=""):
"""
put txt onto the reponse object .. adding end string if provided.
output is NOT escaped.
"""
txt = unicode(txt)
logging.info(u'remove.event - out - %s - %s' % (self.userhost, txt))
self.bot.say(self.remoteout, txt, self)
def write(self, txt, start=u"", end=u"<br>", raw=False):
"""
put txt onto the reponse object .. adding end string if provided.
output IS escaped.
"""
if not raw:
self._raw(start + cgi.escape(txt) + end)
else:
self._raw(start + txt + end)
def reply(self, txt, resultlist=[], nritems=False, dot=" .. ", raw=False, *args, **kwargs):
""" send reply to the web user. """
if self.checkqueues(resultlist):
return
result = self.makeresponse(txt, resultlist, nritems, dot, *args, **kwargs)
self.write(result)
| Python |
# gozerlib/outputcache.py
#
#
## gozerlib imports
from persist import Persist
from utils.name import stripname
## basic imports
import os
## functions
def add(target, txtlist):
cache = Persist('outputcache' + os.sep + stripname(target))
d = cache.data
if not d.has_key('msg'):
d['msg'] = []
d['msg'].extend(txtlist)
while len(d['msg']) > 30:
d['msg'].pop(0)
cache.save()
def set(target, txtlist):
cache = Persist('outputcache' + os.sep + stripname(target))
if not cache.data.has_key('msg'):
cache.data['msg'] = []
cache.data['msg'] = txtlist
cache.save()
def get(target):
cache = Persist('outputcache' + os.sep + stripname(target))
try:
result = cache.data['msg']
if result:
cache.data['msg'] = []
cache.save()
return result
except KeyError:
return []
return []
| Python |
# gozerlib/threads.py
#
#
""" own threading wrapper. """
## lib imports
from utils.exception import handle_exception
## basic imports
import threading
import re
import time
import thread
import logging
## defines
# regular expression to determine thread name
methodre = re.compile('method\s+(\S+)', re.I)
funcre = re.compile('function\s+(\S+)', re.I)
## classes
class Botcommand(threading.Thread):
""" thread for running bot commands. """
def __init__(self, group, target, name, args, kwargs):
threading.Thread.__init__(self, None, target, name, args, kwargs)
self.name = name
self.ievent = args[1]
self.setDaemon(True)
def join(self):
""" join the thread. """
threading.Thread.join(self)
def run(self):
""" run the bot command. """
try:
result = threading.Thread.run(self)
if self.ievent.closequeue:
logging.debug('threads- closing queue for %s' % self.ievent.userhost)
for i in self.ievent.queues:
i.put_nowait(None)
except Exception, ex:
handle_exception(self.ievent)
time.sleep(1)
class Thr(threading.Thread):
""" thread wrapper. """
def __init__(self, group, target, name, args, kwargs):
threading.Thread.__init__(self, None, target, name, args, kwargs)
self.setDaemon(True)
self.name = name
def join(self):
""" join the thread. """
threading.Thread.join(self)
def run(self):
""" run the thread. """
try:
logging.debug('threads - running thread %s' % self.name)
threading.Thread.run(self)
except Exception, ex:
handle_exception()
time.sleep(1)
## functions
def getname(func):
""" get name of function/method. """
name = ""
method = re.search(methodre, str(func))
if method:
name = method.group(1)
else:
function = re.search(funcre, str(func))
if function:
name = function.group(1)
else:
name = str(func)
return name
def start_new_thread(func, arglist, kwargs={}):
""" start a new thread .. set name to function/method name."""
if not kwargs:
kwargs = {}
if not 'name' in kwargs:
name = getname(func)
if not name:
name = str(func)
else:
name = kwargs['name']
logging.warn("new thread: %s - %s" % (name, str(arglist)))
try:
from google.appengine.ext.deferred import defer
defer(func, *arglist, **kwargs)
return
except ImportError:
pass
try:
thread = Thr(None, target=func, name=name, args=arglist, kwargs=kwargs)
thread.start()
return thread
except:
handle_exception()
time.sleep(1)
def start_bot_command(func, arglist, kwargs={}):
""" start a new thread .. set name to function/method name. """
if not kwargs:
kwargs = {}
try:
name = getname(func)
if not name:
name = 'noname'
thread = Botcommand(group=None, target=func, name=name, args=arglist, kwargs=kwargs)
thread.start()
return thread
except:
handle_exception()
time.sleep(1)
def threaded(func):
""" threading decorator. """
def threadedfunc(*args, **kwargs):
start_new_thread(func, args, kwargs)
return threadedfunc
| Python |
# gozerlib/sockets/utils/generic.py
#
#
""" generic functions """
## gozerlib imports
from gozerlib.persist import Persist
from gozerlib.utils.exception import handle_exception
from gozerlib.utils.trace import calledfrom
from gozerlib.config import cfg as config
from gozerlib.utils.lazydict import LazyDict
## simplejson
from simplejson import dumps
## generic imports
from stat import ST_UID, ST_MODE, S_IMODE
import time
import sys
import re
import getopt
import types
import os
import random
import socket
import Queue
def jsonstring(s):
if type(s) == types.TupleType:
s = list(s)
return dumps(s)
def stripident(userhost):
""" strip ident char from userhost """
try:
userhost.getNode()
return str(userhost)
except AttributeError:
pass
if not userhost:
return None
if userhost[0] in "~-+^":
userhost = userhost[1:]
elif userhost[1] == '=':
userhost = userhost[2:]
return userhost
def stripidents(ulist):
""" strip ident char from list of userhosts """
result = []
for userhost in ulist:
result.append(stripident(userhost))
return result
def makedirs(datadir):
if not os.path.isdir(datadir):
os.mkdir(datadir)
if not os.path.isdir(datadir + '/states/'):
os.mkdir(datadir + '/states/')
if not os.path.isdir(datadir + '/db/'):
os.mkdir(datadir + '/db/')
if not os.path.isdir(datadir + '/configs/'):
os.mkdir(datadir + '/configs/')
def cleanpyc():
removed = []
try:
files = os.listdir('gozerplugs')
for file in files:
if file.endswith('.pyc'):
os.unlink('gozerplugs' + os.sep + file)
removed.append(file)
except:
pass
try:
files = os.listdir('gozerplugs/plugs')
for file in files:
if file.endswith('.pyc'):
os.unlink('gozerplugs/plugs' + os.sep + file)
removed.append(file)
except:
pass
return removed
def cleanpycfile(filename):
try:
if filename.endswith('.pyc'):
os.unlink(filename)
rlog(10, 'generic', 'cleaned %s' % filename)
except:
pass
def getversion():
version = config['version']
if config['nodb']:
version += ' JSON_USERS'
else:
version += ' ' + config['dbtype'].upper()
return version
def makeoptions(ievent, options={}):
options = LazyDict(options)
try:
optargs = ""
optlist = []
if not options.has_key('--filter'):
options['--filter'] = ""
if not options.has_key('--to'):
options['--to'] = None
if not options.has_key('--chan'):
options['--chan'] = ievent.channel
if not options.has_key('--how'):
options['--how'] = "msg"
if not options.has_key('--speed'):
options['--speed'] = str(ievent.speed)
else:
options['--speed'] = str(options['--speed'])
for i, j in options.iteritems():
if '--' in i:
optlist.append("%s=" % i[2:])
if j:
optlist.append(j)
continue
if '-' in i:
if j:
optargs += ":%s" % i[1:]
else:
optargs += i[1:]
args = ievent.txt.split()
try:
(opts, rest) = getopt.getopt(args[1:], optargs, optlist)
except AttributeError, ex:
print "option not allowed: %s" % str(ex), ievent.txt, options
return 0
except getopt.GetoptError, ex:
return 0
if opts:
for item in opts:
ievent.optionset.append(item[0])
o = dict(options)
o.update(dict(opts))
try:
filter = o['--filter']
if filter and filter not in ievent.filter:
ievent.filter.append(filter)
except KeyError:
pass
try:
speed = o['--speed']
ievent.speed = int(speed)
except KeyError:
pass
try:
ievent.channel = o['--chan'] or ievent.channel
except KeyError:
pass
ievent.options.update(o)
if args:
ievent.txt = args[0] + ' ' + ' '.join(rest)
makeargrest(ievent)
except Exception, ex:
handle_exception()
return
return ievent.options
def makeargrest(ievent):
""" create ievent.args and ievent.rest .. this is needed because \
ircevents might be created outside the parse() function """
if not ievent.txt:
return
try:
ievent.args = ievent.txt.split()[1:]
except ValueError:
ievent.args = []
try:
cmnd, ievent.rest = ievent.txt.split(' ', 1)
except ValueError:
ievent.rest = ""
ievent.usercmnd = ievent.txt.split()[0]
def setdefenc(encoding):
import sys
reload(sys)
sys.setdefaultencoding(encoding)
def plugfile(datadir):
return datadir + os.sep + calledfrom(sys._getframe())
def cchar(bot, ievent):
try:
cchar = bot.channels[ievent.channel]['cc']
except LookupError:
cchar = config['defaultcc'] or '!'
except TypeError:
cchar = config['defaultcc'] or '!'
return cchar
def splittxt(what, l=375):
txtlist = []
start = 0
end = l
length = len(what)
for i in range(length/end+1):
endword = what.find(' ', end)
if endword == -1:
endword = length
res = what[start:endword]
if res:
txtlist.append(res)
start = endword
end = start + l
return txtlist
class istr(str):
pass
def die():
os.kill(os.getpid(), 9)
def getlistensocket(listenip):
port = 5000
while 1:
time.sleep(0.01)
try:
port += 1
if ':' in listenip:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(1)
if port > 65000:
port = 5000
sock.bind((listenip, port))
sock.listen(1)
return (port, sock)
except Exception, ex:
pass
def checkchan(bot, item):
chanre = re.search(' chan (\S+)', item)
if chanre:
chan = str(chanre.group(1))
item = re.sub(' chan ' + re.escape(chan), '', item)
return (chan.lower(), item)
def getwho(bot, who):
""" get userhost from bots userhost cache """
who = who.lower()
try:
result = bot.userhosts.data[who]
if bot.cfg['stripident']:
rlog(10, 'getwho', 'removed ident from %s' % result)
result = stripident(result)
return result
except KeyError:
return None
def waitforuser(bot, userhost, timeout=15):
queue = Queue.Queue()
waitnr = bot.privwait.register(userhost, queue, timeout)
result = queue.get()
bot.privwait.delete(waitnr)
return result
def getrandomnick():
return "gbot" + str(random.randint(0, 100))
def waitforqueue(queue, timeout=10, maxitems=None):
result = []
while 1:
try:
res = queue.get(1, timeout)
except Queue.Empty:
break
if not res:
break
result.append(res)
if maxitems and len(result) == maxitems:
break
return result
def decodeperchar(txt, encoding='utf-8', what=""):
res = []
nogo = []
for i in txt:
try:
res.append(i.decode(encoding))
except UnicodeDecodeError:
if i not in nogo:
nogo.append(i)
continue
if nogo:
if what:
rlog(10, 'generic', "%s: can't decode %s characters to %s" % (what, nogo, encoding))
else:
rlog(10, 'generic', "can't decode %s characters to %s" % (nogo, encoding))
return u"".join(res)
def toenc(what, encoding='utf-8'):
if not what:
return u""
try:
w = unicode(what)
return w.encode(encoding)
except UnicodeEncodeError:
rlog(10, 'generic', "can't encode %s to %s" % (what, encoding))
return u""
def fromenc(txt, encoding='utf-8', what=""):
if not txt:
return u""
try:
if type(txt) == types.UnicodeType:
t = txt.encode(encoding)
t = unicode(txt)
return unicode(t.decode(encoding))
except UnicodeDecodeError:
return decodeperchar(txt, encoding, what)
def toascii(what):
what = what.encode('ascii', 'replace')
return what
def tolatin1(what):
what = what.encode('latin-1', 'replace')
return what
def strippedtxt(what, allowed=[]):
txt = []
allowed = allowed + ['\001', '\002', '\003', '\t']
for i in what:
if ord(i) > 31 or (allowed and i in allowed):
txt.append(i)
return ''.join(txt)
def uniqlist(l):
result = []
for i in l:
j = i.strip()
if j not in result:
result.append(j)
return result
def fix_format(s):
counters = {
chr(2): 0,
chr(3): 0
}
for letter in s:
if letter in counters:
counters[letter] += 1
for char in counters:
if counters[char] % 2:
s += char
return s
def stripbold(s):
s = s.replace(chr(2), '')
s = s.replace(chr(3), '')
return s
def jabberstrip(text, allowed=[]):
txt = []
allowed = allowed + ['\n', '\t']
for i in text:
if ord(i) > 31 or (allowed and i in allowed):
txt.append(i)
return ''.join(txt)
def plugnames(dirname):
result = []
for i in os.listdir(dirname):
if os.path.isdir(dirname + os.sep + i):
if os.path.exists(dirname + os.sep + i + os.sep + '__init__.py'):
result.append(i)
elif i.endswith('.py'):
result.append(i[:-3])
try:
result.remove('__init__')
except:
pass
return result
def filesize(path):
return os.stat(path)[6]
def touch(fname):
fd = os.open(fname, os.O_WRONLY | os.O_CREAT)
os.close(fd)
def stringinlist(s, l):
for i in l:
if s in i:
return 1
def stripped(userhost):
return userhost.split('/')[0]
def checkpermissions(ddir, umode):
try:
uid = os.getuid()
gid = os.getgid()
except AttributeError:
return
try:
stat = os.stat(ddir)
except OSError:
return
if stat[ST_UID] != uid:
try:
os.chown(ddir, uid, gid)
except:
pass
if S_IMODE(stat[ST_MODE]) != umode:
try:
os.chmod(ddir, umode)
except:
handle_exception()
pass
def gethighest(ddir, ffile):
highest = 0
for i in os.listdir(ddir):
if os.path.isdir(ddir + os.sep + i) and ffile in i:
try:
seqnr = i.split('.')[2]
except IndexError:
continue
try:
if int(seqnr) > highest:
highest = int(seqnr)
except ValueError:
pass
ffile += '.' + str(highest + 1)
return ffile
def dosed(filename, sedstring):
try:
f = open(filename, 'r')
except IOError, ex:
if 'Is a dir' in str(ex):
return
else:
raise
tmp = filename + '.tmp'
fout = open(tmp, 'w')
seds = sedstring.split('/')
fr = seds[1].replace('\\', '')
to = seds[2].replace('\\', '')
try:
for line in f:
l = line.replace(fr,to)
fout.write(l)
finally:
fout.flush()
fout.close()
try:
os.rename(tmp, filename)
except WindowsError:
# no atomic operation supported on windows! error is thrown when destination exists
os.remove(filename)
os.rename(tmp, filename)
def convertpickle(src, target):
import gozerbot.compat.persist
p = gozerbot.compat.persist.Persist(src)
if p and p.data:
pers = Persist(target)
if not pers.data:
pers.data = {}
pers.data.update(p.data)
try:
pers.save()
except TypeError:
pers2 = Persist(target)
if not pers2.data:
pers2.data = {}
for item, value in p.data.iteritems():
pers2.data[jsonstring(item)] = value
pers2.save()
| Python |
# gozerbot/rest/server.py
#
#
## gozerlib imports
from gozerlib.utils.exception import handle_exception, exceptionmsg
from gozerlib.utils.trace import calledfrom
from gozerlib.config import cfg as config
from gozerlib.persiststate import ObjectState
from gozerlib.threads import start_new_thread
# basic imports
from SocketServer import BaseServer, ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urllib import unquote_plus
from asyncore import dispatcher
from cgi import escape
import time
import sys
import select
import types
import socket
import logging
class RestServerBase(HTTPServer):
""" REST web server """
allow_reuse_address = True
daemon_thread = True
def start(self):
self.name = calledfrom(sys._getframe(0))
self.stop = False
self.running = False
self.handlers = {}
self.webmods = {}
self.state = ObjectState()
self.state.define('whitelistenable', 0)
self.state.define('whitelist', [])
self.state.define('blacklist', [])
self.state.define('disable', [])
self.poll = select.poll()
self.poll.register(self)
start_new_thread(self.serve, ())
def shutdown(self):
try:
self.stop = True
#self.socket.shutdown(2)
#self.socket.close()
time.sleep(0.2)
self.server_close()
except Exception, ex:
handle_exception()
def serve(self):
logging.warn('rest.server - starting')
time.sleep(1)
while not self.stop:
self.running = True
try:
got = self.poll.poll(100)
except Exception, ex:
handle_exception()
if got and not self.stop:
try:
self.handle_request()
except Exception, ex:
handle_exception()
time.sleep(0.01)
self.running = False
logging.warn('rest.server - stopping')
def entrypoint(self, request):
ip = request.ip
if not self.whitelistenable() and ip in self.blacklist():
logging.warn('rest.server - denied %s' % ip)
request.send_error(401)
return False
if self.whitelistenable() and ip not in self.whitelist():
logging.warn('rest.server - denied %s' % ip)
request.send_error(401)
return False
return True
def whitelistenable(self):
return self.state['whitelistenable']
def whitelist(self):
return self.state['whitelist']
def blacklist(self):
return self.state['blacklist']
def addhandler(self, path, type, handler):
""" add a web handler """
splitted = []
for i in path.split('/'):
if i:
splitted.append(i)
splitted = tuple(splitted)
if not self.handlers.has_key(splitted):
self.handlers[splitted] = {}
self.handlers[splitted][type] = handler
logging.info('rest.server - %s %s handler added' % (splitted, type))
def enable(self, what):
try:
self.state['disable'].remove(what)
logging.info('rest.server - enabled %s' % str(what))
except ValueError:
pass
def disable(self, what):
self.state['disable'].append(what)
logging.info('rest.server - disabled %s' % str(what))
def do(self, request):
""" do a request """
path = request.path.split('?')[0]
if path.endswith('/'):
path = path[:-1]
splitted = []
for i in path.split('/'):
if i:
splitted.append(i)
splitted = tuple(splitted)
for i in self.state['disable']:
if i in splitted:
logging.warn('rest.server - %s - denied disabled %s' % (request.ip, i))
request.send_error(404)
return
request.splitted = splitted
request.value = None
type = request.command
try:
func = self.handlers[splitted][type]
except (KeyError, ValueError):
try:
func = self.handlers[splitted[:-1]][type]
request.value = splitted[-1]
except (KeyError, ValueError):
request.send_error(404)
return
result = func(self, request)
logging.info('rest.server - %s - result: %s' % (request.ip, str(result)))
return result
def handle_error(self, request, addr):
""" log the error """
ip = request.ip
exctype, excvalue, tb = sys.exc_info()
if exctype == socket.timeout:
logging.warn('rest.server - %s - socket timeout' % (ip, ))
return
if exctype == socket.error:
logging.warn('rest.server - %s - socket error: %s' % (ip, excvalue))
return
exceptstr = exceptionmsg()
logging.warn('rest.server - %s - error %s %s => %s' % (ip, exctype, excvalue, exceptstr))
class RestServer(ThreadingMixIn, RestServerBase):
pass
class RestServerAsync(RestServerBase, dispatcher):
pass
class RestRequestHandler(BaseHTTPRequestHandler):
""" timeserver request handler class """
def setup(self):
BaseHTTPRequestHandler.setup(self)
self.ip = self.client_address[0]
self.name = self.ip
self.size = 0
def writeheader(self, type='text/plain'):
self.send_response(200)
self.send_header('Content-type', '%s; charset=%s ' % (type,sys.getdefaultencoding()))
self.send_header('Server', config['version'])
self.end_headers()
def sendresult(self):
try:
result = self.server.do(self)
if not result:
return
self.size = len(result)
except Exception, ex:
handle_exception()
self.send_error(501)
return
self.writeheader()
self.wfile.write(result)
self.wfile.close()
def handle_request(self):
if not self.server.entrypoint(self):
return
self.sendresult()
do_DELETE = do_PUT = do_GET = do_POST = handle_request
def log_request(self, code):
""" log the request """
try:
ua = self.headers['user-agent']
except:
ua = "-"
try:
rf = self.headers['referer']
except:
rf = "-"
if hasattr(self, 'path'):
logging.debug('rest.server - %s "%s %s %s" %s %s "%s" "%s"' % (self.address_string(), self.command, self.path, self.request_version, code, self.size, rf, ua))
else:
logging.debug('rest.server - %s "%s %s %s" %s %s "%s" "%s"' % (self.address_string(), self.command, "none", self.request_version, code, self.size, rf, ua))
class SecureRestServer(RestServer):
def __init__(self, server_address, HandlerClass, keyfile, certfile):
from OpenSSL import SSL
BaseServer.__init__(self, server_address, HandlerClass)
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.set_options(SSL.OP_NO_SSLv2)
logging.warn("rest.server - loading private key from %s" % keyfile)
ctx.use_privatekey_file (keyfile)
logging.warn('rest.server - loading certificate from %s' % certfile)
ctx.use_certificate_file(certfile)
logging.info('rest.server - creating SSL socket on %s' % str(server_address))
self.socket = SSL.Connection(ctx, socket.socket(self.address_family,
self.socket_type))
self.server_bind()
self.server_activate()
class SecureAuthRestServer(SecureRestServer):
def __init__(self, server_address, HandlerClass, chain, serverkey, servercert):
from OpenSSL import SSL
BaseServer.__init__(self, server_address, HandlerClass)
ctx = SSL.Context(SSL.SSLv23_METHOD)
logging.warn("rest.server - loading private key from %s" % serverkey)
ctx.use_privatekey_file (serverkey)
logging.warn('rest.server - loading certificate from %s' % servercert)
ctx.use_certificate_file(servercert)
logging.warn('rest.server - loading chain of certifications from %s' % chain)
ctx.set_verify_depth(2)
ctx.load_client_ca(chain)
#ctx.load_verify_locations(chain)
logging.info('rest.server - creating SSL socket on %s' % str(server_address))
callback = lambda conn,cert,errno,depth,retcode: retcode
ctx.set_verify(SSL.VERIFY_FAIL_IF_NO_PEER_CERT | SSL.VERIFY_PEER, callback)
ctx.set_session_id('feedprovider')
self.socket = SSL.Connection(ctx, socket.socket(self.address_family,
self.socket_type))
self.server_bind()
self.server_activate()
class SecureRequestHandler(RestRequestHandler):
def setup(self):
self.connection = self.request._sock
self.request._sock.setblocking(1)
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.rbufsize)
| Python |
# gozerlib/rest/client.py
#
#
""" Rest Client class """
## gozerlib imports
from gozerlib.utils.url import geturl3, geturl4, posturl, deleteurl, useragent
from gozerlib.utils.generic import toenc
from gozerlib.utils.exception import handle_exception, exceptionmsg
from gozerlib.utils.locking import lockdec
from gozerlib.utils.lazydict import LazyDict
## simplejson import
from simplejson import loads
## basic imports
from urllib2 import HTTPError, URLError
from httplib import InvalidURL
from urlparse import urlparse
import socket
import asynchat
import urllib
import sys
import thread
import re
import asyncore
import time
import logging
## defines
restlock = thread.allocate_lock()
locked = lockdec(restlock)
## classes
class RestResult(LazyDict):
def __init__(self, url="", name=""):
LazyDict.__init__(self)
self.url = url
self.name = name
self.data = None
self.error = None
self.status = None
self.reason = ""
class RestClient(object):
def __init__(self, url, keyfile=None, certfile=None, port=None):
if not url.endswith('/'):
url += '/'
try:
u = urlparse(url)
splitted = u[1].split(':')
if len(splitted) == 2:
host, port = splitted
else:
host = splitted[0]
port = port or 9999
path = u[2]
except Exception, ex:
raise
self.host = host
try:
self.ip = socket.gethostbyname(self.host)
except Exception, ex:
handle_exception()
self.path = path
self.port = port
self.url = url
self.keyfile = keyfile
self.certfile = certfile
self.callbacks = []
def addcb(self, callback):
if not callback:
return
self.callbacks.append(callback)
logging.debug('rest.client - added callback %s' % str(callback))
return self
def delcb(self, callback):
try:
del self.callbacks[callback]
logging.debug('rest.client - deleted callback %s' % str(callback))
except ValueError:
pass
def do(self, func, url, *args, **kwargs):
result = RestResult(url)
try:
logging.info("rest.client - %s - calling %s" % (url, str(func)))
res = func(url, {}, kwargs, self.keyfile, self.certfile, self.port)
result.status = res.status
result.reason = res.reason
if result.status >= 400:
result.error = result.status
else:
result.error = None
if result.status == 200:
r = res.read()
result.data = loads(r)
else:
result.data = None
logging.info("rest.client - %s - result: %s" % (url, str(result)))
except Exception, ex:
result.error = str(ex)
result.data = None
for cb in self.callbacks:
try:
cb(self, result)
logging.info('rest.client - %s - called callback %s' % (url, str(cb)))
except Exception, ex:
handle_exception()
return result
def post(self, *args, **kwargs):
return self.do(posturl, self.url, *args, **kwargs)
def add(self, *args, **kwargs):
return self.do(posturl, self.url, *args, **kwargs)
def delete(self, nr=None):
if nr:
return self.do(deleteurl, self.url + '/' + str(nr))
else:
return self.do(deleteurl, self.url)
def get(self, nr=None):
if not nr:
return self.do(geturl4, self.url)
else:
return self.do(geturl4, self.url + '/' + str(nr))
class RestClientAsync(RestClient, asynchat.async_chat):
def __init__(self, url, name=""):
RestClient.__init__(self, url)
asynchat.async_chat.__init__(self)
self.set_terminator("\r\n\r\n")
self.reading_headers = True
self.error = None
self.buffer = ''
self.name = name or self.url
self.headers = {}
self.status = None
def handle_error(self):
exctype, excvalue, tb = sys.exc_info()
if exctype == socket.error:
try:
errno, errtxt = excvalue
if errno in [11, 35, 9]:
logging.error("res.client - %s - %s %s" % (self.url, errno, errtxt))
return
except ValueError:
pass
self.error = str(excvalue)
else:
rlog(10, self.name, exceptionmsg())
self.error = exceptionmsg()
self.buffer = ''
result = RestResult(self.url, self.name)
result.error = self.error
result.data = None
for cb in self.callbacks:
try:
cb(self, result)
logging.info('rest.client - %s - called callback %s' % (url, str(cb)))
except Exception, ex:
handle_exception()
self.close()
def handle_expt(self):
handle_exception()
def handle_connect(self):
logging.info('rest.client - %s - connected %s' % (self.url, str(self)))
def start(self):
assert(self.host)
assert(int(self.port))
try:
logging.info('rest.client - %s - starting client' % self.url)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self.ip, int(self.port)))
except socket.error, ex:
self.error = str(ex)
try:
self.connect((self.ip, int(self.port)))
except socket.error, ex:
self.error = str(ex)
except Exception, ex:
self.error = str(ex)
if self.error:
self.warn("rest.client - %s - can't start %s" % (self.url, self.error))
else:
return True
@locked
def found_terminator(self):
logging.info('rest.client - %s - found terminator' % self.url)
if self.reading_headers:
self.reading_headers = False
try:
self.headers = self.buffer.split('\r\n')
self.status = int(self.headers[0].split()[1])
except (ValueError, IndexError):
logging.warn("rest.client - %s - can't parse headers %s" % (self.url, self.headers))
return
self.set_terminator(None)
self.buffer = ''
logging.info('rest.client - %s - headers: %s' % (self.url, self.headers))
def collect_incoming_data(self, data):
self.buffer = self.buffer + data
def handle_close(self):
self.reading_headers = False
self.handle_incoming()
logging.info('rest.client - %s - closed' % self.url)
self.close()
def handle_incoming(self):
logging.info("rest.client - %s - incoming: %s" % (self.url, self.buffer))
if not self.reading_headers:
result = RestResult(self.url, self.name)
if self.status >= 400:
logging.warn('rest.client - %s - error status: %s' % (self.url, self.status))
result.error = self.status
result.data = None
elif self.error:
result.error = self.error
result.data = None
elif self.buffer == "":
result.data = ""
result.error = None
else:
try:
res = loads(self.buffer)
if not res:
self.buffer = ''
return
result.data = res
result.error = None
except ValueError, ex:
logging.info("rest.client - %s - can't decode %s" % (self.url, self.buffer))
result.error = str(ex)
except Exception, ex:
logging.error("rest.client - %s - %s" % (self.url, exceptionmsg()))
result.error = exceptionmsg()
result.data = None
for cb in self.callbacks:
try:
cb(self, result)
logging.info('rest.client - %s - called callback %s' % (self.url, str(cb)))
except Exception, ex:
handle_exception()
self.buffer = ''
@locked
def dorequest(self, method, path, postdata={}, headers={}):
if postdata:
postdata = urllib.urlencode(postdata)
if headers:
if not headers.has_key('Content-Length'):
headers['Content-Length'] = len(postdata)
headerstxt = ""
for i,j in headers.iteritems():
headerstxt += "%s: %s\r\n" % (i.lower(), j)
else:
headerstxt = ""
if method == 'POST':
s = toenc("%s %s HTTP/1.0\r\n%s\r\n%s\r\n\r\n" % (method, path, headerstxt, postdata), 'ascii')
else:
s = toenc("%s %s HTTP/1.0\r\n\r\n" % (method, path), 'ascii')
if self.start():
logging.info('rest.client - %s - sending %s' % (self.url, s))
self.push(s)
def sendpost(self, postdata):
headers = {'Content-Type': 'application/x-www-form-urlencoded', \
'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
self.dorequest('POST', self.path, postdata, headers)
def sendget(self):
self.dorequest('GET', self.path)
def post(self, *args, **kwargs):
self.sendpost(kwargs)
def get(self):
self.sendget()
| Python |
# gozerlib/socket/partyline.py
#
#
""" provide partyline functionality .. manage dcc sockets. """
__copyright__ = 'this file is in the public domain'
__author__ = 'Aim'
## gozerlib imports
from gozerlib.utils.exception import handle_exception
from gozerlib.threads import start_new_thread
## simplejson import
from simplejson import load
## basic imports
import thread
import pickle
import socket
## classes
class PartyLine(object):
"""
partyline can be used to talk through dcc chat connections.
"""
def __init__(self):
self.socks = [] # partyline sockets list
self.jids = []
self.lock = thread.allocate_lock()
def _doresume(self, data, reto=None):
"""
resume a party line connection after reboot.
:param data: resume data
:type data: dict .. see PartyLine._resumedata
:param reto: nick of user to reply to
:type reto: string
"""
for i in data['partyline']:
bot = fleet.byname(i['botname'])
sock = socket.fromfd(i['fileno'], socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(1)
nick = i['nick']
userhost = i['userhost']
channel = i['channel']
if not bot:
rlog(10, 'partyline', "can't find %s bot in fleet" % i['botname'])
continue
self.socks.append({'bot': bot, 'sock': sock, 'nick': nick, 'userhost': userhost, 'channel': channel, 'silent': i['silent']})
bot._dccresume(sock, nick, userhost, channel)
if reto:
self.say_nick(nick, 'rebooting done')
def _resumedata(self):
"""
return data used for resume.
:rtype: list .. list of resumedata (dicts)
"""
result = []
for i in self.socks:
result.append({'botname': i['bot'].name, 'fileno': i['sock'].fileno(), 'nick': i['nick'], 'userhost': i['userhost'], 'channel': i['channel'], 'silent': i['silent']})
return result
def resume(self, sessionfile):
"""
resume from session file.
:param sessionfile: path to resume file
:type sessionfile: string
"""
session = load(open(sessionfile, 'r'))
try:
reto = session['channel']
self._doresume(session, reto)
except Exception, ex:
handle_exception()
def stop(self, bot):
"""
stop all users on bot.
:param bot: bot to stop users on
:type bot: gozerbot.eventbase.EventBase
"""
for i in self.socks:
if i['bot'] == bot:
try:
i['sock'].shutdown(2)
i['sock'].close()
except:
pass
def stop_all(self):
"""
stop every user on partyline.
"""
for i in self.socks:
try:
i['sock'].shutdown(2)
i['sock'].close()
except:
pass
def loud(self, nick):
"""
enable broadcasting of txt for nick.
:param nick: nick to put into loud mode
:type nick: string
"""
for i in self.socks:
if i['nick'] == nick:
i['silent'] = False
def silent(self, nick):
"""
disable broadcasting txt from/to nick.
:param nick: nick to put into silent mode
:type nick: string
"""
for i in self.socks:
if i['nick'] == nick:
i['silent'] = True
def add_party(self, bot, sock, nick, userhost, channel):
'''
add a socket with nick to the list.
:param bot: bot to add party on
:type bot: gozerbot.botbase.BotBase
:param sock: socket of party to add
:type sock: socket.socket
:param nick: nick of party to add
:type nick: string
:param userhost: userhost of party to add
:type userhost: string
:param channel: channel of party to add
:type channel: string
'''
for i in self.socks:
if i['sock'] == sock:
return
self.socks.append({'bot': bot, 'sock': sock, 'nick': nick, \
'userhost': userhost, 'channel': channel, 'silent': False})
rlog(1, 'partyline', 'added user %s on the partyline' % nick)
def del_party(self, nick):
'''
remove a socket with nick from the list.
:param nick: nick to remove
:type nick: string
'''
nick = nick.lower()
self.lock.acquire()
try:
for socknr in range(len(self.socks)-1, -1, -1):
if self.socks[socknr]['nick'].lower() == nick:
del self.socks[socknr]
rlog(1, 'partyline', 'removed user %s from the partyline' % nick)
finally:
self.lock.release()
def list_nicks(self):
'''
list all connected nicks.
:rtype: list
'''
result = []
for item in self.socks:
result.append(item['nick'])
return result
def say_broadcast(self, txt):
'''
broadcast a message to all ppl on partyline.
:param txt: txt to broadcast
:type txt: string
'''
for item in self.socks:
if not item['silent']:
item['sock'].send("%s\n" % txt)
def say_broadcast_notself(self, nick, txt):
'''
broadcast a message to all ppl on partyline, except the sender.
:param nick: nick to ignore
:type nick: string
:param txt: text to broadcast
:type txt: string
'''
nick = nick.lower()
for item in self.socks:
if item['nick'] == nick:
continue
if not item['silent']:
item['sock'].send("%s\n" % txt)
def say_nick(self, nickto, msg):
'''
say a message on the partyline to an user.
:param nickto: nick to send txt to
:type nickto: string
:param msg: msg to send
:type msg: string
'''
nickto = nickto.lower()
for item in self.socks:
if item['nick'].lower() == nickto:
if not '\n' in msg:
msg += "\n"
item['sock'].send("%s" % msg)
return
def is_on(self, nick):
'''
checks if user an is on the partyline.
:param nick: nick to check
:type nick: string
:rtype: boolean
'''
nick = nick.lower()
for item in self.socks:
if item['nick'].lower() == nick:
return True
return False
## INIT SECTION
# the partyline !
partyline = PartyLine()
## END INIT
| Python |
# gozerlib/socket/irc/bot.py
#
#
#
"""
a bot object handles the dispatching of commands and check for callbacks
that need to be fired.
"""
## gozerlib imports
from gozerlib.utils.exception import handle_exception
from gozerlib.utils.generic import waitforqueue, uniqlist, strippedtxt
from gozerlib.commands import cmnds
from gozerlib.callbacks import callbacks
from gozerlib.plugins import plugs as plugins
from gozerlib.users import users
from gozerlib.datadir import datadir
from gozerlib.threads import start_new_thread, threaded
from gozerlib.utils.dol import Dol
from gozerlib.utils.pdod import Pdod
from gozerlib.persiststate import PersistState
from gozerlib.runner import runners_start
from gozerlib.errors import NoSuchCommand
from gozerlib.channelbase import ChannelBase
## gozerlib.socket.irc imports
from gozerlib.socket.partyline import partyline
from channels import Channels
from irc import Irc
from ircevent import Ircevent
from monitor import outmonitor
from wait import Privwait
from gozerlib.socket.utils.generic import getlistensocket, checkchan, makeargrest
## basic imports
import re
import socket
import struct
import Queue
import time
import os
import types
import logging
## defines
dccchatre = re.compile('\001DCC CHAT CHAT (\S+) (\d+)\001', re.I)
## classes
class Bot(Irc):
""" class that dispatches commands and checks for callbacks to fire. """
def __init__(self, cfg={}, users=None, plugs=None, *args, **kwargs):
Irc.__init__(self, cfg, users, plugs, *args, **kwargs)
# object used to wait for PRIVMSG
self.privwait = Privwait()
# channels where we are op
if self.state:
if not self.state.has_key('opchan'):
self.state['opchan'] = []
self.userchannels = Dol()
if not self.state.has_key('joinedchannels'):
self.state['joinedchannels'] = []
self.monitor = outmonitor
self.monitor.start()
def __str__(self):
return "name: %s nick: %s server: %s ipv6: %s ssl: %s port:%s" % (self.name, \
self.nick, self.server, self.ipv6, self.ssl, self.port)
def _resume(self, data, reto):
""" resume the bot. """
if not Irc._resume(self, data, reto):
return 0
for i in self.state['joinedchannels']:
self.who(self, i)
return 1
def _dccresume(self, sock, nick, userhost, channel=None):
""" resume dcc loop. """
if not nick or not userhost:
return
start_new_thread(self._dccloop, (sock, nick, userhost, channel))
def _dcclisten(self, nick, userhost, channel):
""" accept dcc chat requests. """
try:
# get listen socket on host were running on
listenip = socket.gethostbyname(socket.gethostname())
(port, listensock) = getlistensocket(listenip)
# convert ascii ip to netwerk 32 bit
ipip2 = socket.inet_aton(listenip)
ipip = struct.unpack('>L', ipip2)[0]
# send dcc chat request
chatmsg = 'DCC CHAT CHAT %s %s' % (ipip, port)
self.ctcp(nick, chatmsg)
# go listen to response
sock = listensock.accept()[0]
except Exception, ex:
logging.error('irc - dcc error: %s' % str(ex))
return
# connected
self._dodcc(sock, nick, userhost, channel)
def _dodcc(self, sock, nick, userhost, channel=None):
""" send welcome message and loop for dcc commands. """
if not nick or not userhost:
return
try:
# send welcome message .. show list of commands for USER perms
cmndslist = cmnds.list('USER')
cmndslist.sort()
sock.send('Welcome to the GOZERBOT partyline ' + nick + " ;]\n")
partylist = partyline.list_nicks()
if partylist:
sock.send("people on the partyline: %s\n" % ' .. '.join(partylist))
sock.send("control character is ! .. bot broadcast is @\n")
except Exception, ex:
logging.error('irc - dcc error: %s' % str(ex))
return
start_new_thread(self._dccloop, (sock, nick, userhost, channel))
def _dccloop(self, sock, nick, userhost, channel=None):
""" loop for dcc commands. """
sockfile = sock.makefile('r')
res = ""
# add joined user to the partyline
partyline.add_party(self, sock, nick, userhost, channel)
while 1:
time.sleep(0.001)
try:
# read from socket
res = sockfile.readline()
# if res == "" than the otherside had disconnected
if self.stopped or not res:
logging.info('irc - closing dcc with ' + nick)
partyline.del_party(nick)
return
except socket.timeout:
# skip on timeout
continue
except socket.error, ex:
# handle socket errors .. skip on errno 35 and 11 temp unavail
try:
(errno, errstr) = ex
except:
errno = 0
errstr = str(ex)
if errno == 35 or errno == 11:
continue
else:
raise
except Exception, ex:
# other exception occured .. close connection
handle_exception()
logging.info('irc - closing dcc with ' + nick)
partyline.del_party(nick)
return
try:
# see if user provided channel
res = strippedtxt(res.strip())
chan = checkchan(self, res)
if chan != None:
(channel, res) = chan
else:
channel = nick
# create ircevent
ievent = Ircevent()
ievent.nick = nick
ievent.userhost = userhost
ievent.channel = channel
ievent.origtxt = res
ievent.txt = res
ievent.cmnd = 'DCC'
ievent.bot = self
ievent.sock = sock
ievent.speed = 1
ievent.isdcc = True
ievent.msg = True
# check if its a command if so dispatch
if ievent.txt[0] == "!":
ievent.txt = ievent.txt[1:]
self.doevent(ievent)
continue
elif ievent.txt[0] == "@":
# command is broadcast so send response to the paryline
# members
partyline.say_broadcast_notself(ievent.nick, "[%s] %s" % (ievent.nick, ievent.txt))
# make queue and run trydispatch to see if command has
# fired
q = Queue.Queue()
ievent.queues = [q]
ievent.txt = ievent.txt[1:]
self.doevent(ievent)
# wait for result .. default timeout is 10 sec
result = waitforqueue(q, 5)
if result:
# broadcast result
for i in result:
partyline.say_broadcast("[bot] %s" % i)
continue
else:
# not a command so send txt to partyline
partyline.say_broadcast_notself(ievent.nick, \
"[%s] %s" % (ievent.nick, ievent.txt))
# check PRIVMSG wait
self.privwait.check(ievent)
except socket.error, ex:
try:
(errno, errstr) = ex
except:
errno = 0
errstr = str(ex)
if errno == 35 or errno == 11:
continue
except Exception, ex:
handle_exception()
sockfile.close()
logging.warn('irc - closing dcc with ' + nick)
def _dccconnect(self, nick, userhost, addr, port):
""" connect to dcc request from nick. """
try:
port = int(port)
logging.warn("irc - dcc - connecting to %s:%s (%s)" % (addr, port, userhost))
if re.search(':', addr):
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.connect((addr, port))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((addr, port))
except Exception, ex:
logging.error('irc - dcc error: %s' % str(ex))
return
# were connected .. start dcc loop
self._dodcc(sock, nick, userhost)
@threaded
def start(self):
Irc.start(self)
self.joinchannels()
@threaded
def reconnect(self):
""" reconnect and if succesfull join channels. """
if Irc.reconnect(self):
self.joinchannels()
def joinchannels(self):
""" join channels. """
for i in self.state['joinedchannels']:
try:
channel = ChannelBase(self.datadir + os.sep + 'channels' + os.sep + i)
if channel:
key = channel.getpass()
else:
key=None
logging.warn('irc - join %s' % i.split()[0])
start_new_thread(self.join, (i, key))
time.sleep(1)
except Exception, ex:
logging.warn('irc - failed to join %s: %s' % (i, str(ex)))
handle_exception()
def broadcast(self, txt):
""" broadcast txt to all joined channels. """
for i in self.state['joinedchannels']:
self.say(i, txt)
def send(self, txt):
""" call Irc send and check for monitor callbacks. """
Irc.send(self, str(txt))
self.monitor.put(self, str(txt))
def save(self):
""" saves channels and state. """
if self.channels:
self.channels.save()
if self.userhosts:
self.userhosts.save()
Irc.save(self)
def stop(self):
""" stop the bot. """
self.stopped = 1
# shut down handlers
logging.warn('irc - stopped')
def exit(self):
""" save data, quit the bot and do shutdown. """
if self.connectok.isSet():
try:
self._raw('QUIT :%s' % self.cfg['quitmsg'])
except IOError:
pass
self.stop()
partyline.stop(self)
Irc.exit(self)
self.save()
logging.warn('irc - exit')
return 1
def getchannelmode(self, channel):
""" send MODE request for channel. """
if not channel:
return
self.putonqueue(9, 'MODE %s' % channel)
def join(self, channel, password=None):
""" join a channel .. use optional password. """
result = Irc.join(self, channel, password)
if result != 1:
return result
chan = ChannelBase(channel)
# if password is provided set it
got = False
if password:
chan.setkey('IRC',password)
got = True
# check for control char .. if its not there init to !
if not chan.data.cc:
chan.data.cc = self.cfg.defaultcc or '!'
got = True
if not chan.data.perms:
chan.data.perms = []
got = True
if not chan.data.mode:
chan.data.mode = ""
got = True
if got:
chan.save()
self.getchannelmode(channel)
return 1
def say(self, printto, what, who=None, how='msg', fromm=None, speed=5, groupchat=False):
""" output what to printto. """
# check if printto is a queue if so put output to the queue
if type(printto) == type(Queue.Queue):
printto.put_nowait('[%s] %s' % (self.name, what))
return
# check if bot is in notice mode
notice = False
try:
notice = self.channels[printto]['notice']
except (KeyError, TypeError):
pass
if notice:
how = 'notice'
Irc.say(self, printto, what, who, how, fromm, speed, groupchat)
def handle_privmsg(self, ievent):
""" check if PRIVMSG is command, if so dispatch. """
if ievent.nick in self.nicks401:
logging.debug("irc - %s is available again" % ievent.nick)
self.nicks401.remove(ievent.nick)
if not ievent.txt:
return
# check if it is a dcc chat request
chat = re.search(dccchatre, ievent.txt)
if chat:
# check if the user is known
if users.allowed(ievent.userhost, 'USER'):
# start connection
start_new_thread(self._dccconnect, (ievent.nick, ievent.userhost, chat.group(1), chat.group(2)))
return
# see if base class method would handle it
if '\001' in ievent.txt:
Irc.handle_privmsg(self, ievent)
return
# set bot and socket in ircevent
ievent.bot = self
ievent.sock = self.sock
chan = ievent.channel
# check for /msg
if chan == self.nick:
ievent.msg = 1
ievent.speed = 7
ievent.printto = ievent.nick
ccs = ['!', '@', self.cfg['defaultcc']]
# check for PRIVMSG waiting callback
self.privwait.check(ievent)
if ievent.isresponse:
return
if self.cfg['noccinmsg'] and self.msg:
self.doevent(ievent)
elif ievent.txt[0] in ccs:
self.doevent(ievent)
return
ievent.printto = chan
# see if we can get channel control character
try:
channel = ChannelBase(chan)
cchar = channel.data.cc
except Exception, ex:
handle_exception()
cchar = "!"
if not cchar:
cchar = "!"
# see if cchar matches, if so dispatch
ievent.speed = 5
if ievent.txt[0] in cchar:
ievent.cc = ievent.txt[0]
ievent.usercmnd = ievent.txt.split()[0]
try:
self.doevent(ievent)
except NoSuchCommand:
ievent.reply("no %s command found" % ievent.usercmnd)
return
# see if were adressed, if so dispatch
txtlist = ievent.txt.split(':', 1)
if txtlist[0] == self.nick:
if len(txtlist) < 2:
return
ievent.txt = txtlist[1].strip()
ievent.usercmnd = ievent.txt.split()[0]
ievent.makeargs()
try:
self.doevent(ievent)
except NoSuchCommand:
ievent.reply("no %s command found" % ievent.usercmnd)
return
# habbie addressing mode
txtlist = ievent.txt.split(',', 1)
if txtlist[0] == self.nick:
if len(txtlist) < 2:
return
ievent.txt = txtlist[1].strip()
ievent.usercmnd = ievent.txt.split()[0]
ievent.makeargs()
try:
self.doevent(ievent)
except NoSuchCommand:
ievent.reply("no %s command found" % ievent.usercmnd)
return
# check for PRIVMSG waiting callback
self.privwait.check(ievent)
def handle_join(self, ievent):
""" handle joins. """
if ievent.nick in self.nicks401:
logging.debug("irc - %s is available again" % ievent.nick)
self.nicks401.remove(ievent.nick)
chan = ievent.channel
nick = ievent.nick
# see if its the bot who is joining
if nick == self.nick:
# check if we already have a channels object, if not init it
if self.channels:
if not self.channels.has_key(chan):
self.channels[chan] = {}
self.channels[chan]['cc'] = self.cfg['defaultcc'] or '!'
if not chan in self.state['joinedchannels']:
self.state['joinedchannels'].append(chan)
self.state.save()
if chan in self.state['opchan']:
self.state['opchan'].remove(chan)
self.state.save()
time.sleep(0.5)
self.who(self, chan)
return
# sync joined user with userhosts cache
if self.userhosts:
self.userhosts.data[nick] = ievent.userhost
if self.userchannels:
self.userchannels.adduniq(nick, chan)
def handle_kick(self, ievent):
""" handle kick event. """
try:
who = ievent.arguments[1]
except IndexError:
return
chan = ievent.channel
# see if its the bot who got kicked .. if so remove from
# joinedchannels
if who == self.nick:
if chan in self.state['joinedchannels']:
self.state['joinedchannels'].remove(chan)
self.state.save()
def handle_nick(self, ievent):
""" update userhost cache on nick change. """
nick = ievent.txt
self.userhosts.data[nick] = ievent.userhost
if ievent.nick == self.nick:
self.cfg['nick'] = nick
self.cfg.save()
try:
self.userchannels[nick] = self.userchannels[ievent.nick]
except:
raise
def handle_part(self, ievent):
""" handle parts. """
chan = ievent.channel
# see if its the bot who is parting
if ievent.nick == self.nick:
logging.warn('irc - parted channel %s' % chan)
# remove from joinedchannels
if chan in self.state['joinedchannels']:
self.state['joinedchannels'].remove(chan)
self.state.save()
def handle_ievent(self, ievent):
""" check for callbacks, call Irc method. """
try:
# call parent method
Irc.handle_ievent(self, ievent)
# check for callbacks
if ievent.cmnd == 'JOIN' or ievent.msg:
if ievent.nick in self.nicks401:
self.nicks401.remove(ievent.nick)
i = Ircevent()
i.copyin(ievent)
i.bot = self
i.sock = self.sock
callbacks.check(self, i)
except:
handle_exception()
def handle_quit(self, ievent):
""" check if quit is because of a split. """
if '*.' in ievent.txt or self.server in ievent.txt:
self.splitted.append(ievent.nick)
def handle_mode(self, ievent):
""" check if mode is about channel if so request channel mode. """
logging.warn("irc - mode change %s" % str(ievent.arguments))
try:
dummy = ievent.arguments[2]
except IndexError:
chan = ievent.channel
# channel mode change has 2 arguments
self.getchannelmode(chan)
if self.channels:
self.channels.set(chan, 'mode', ievent.arguments[1])
def handle_311(self, ievent):
""" handle 311 response .. sync with userhosts cache. """
target, nick, user, host, dummy = ievent.arguments
nick = nick
userhost = "%s@%s" % (user, host)
logging.debug('irc - adding %s to userhosts: %s' % (nick, userhost))
# userhosts cache is accessed by lower case nick
if self.userhosts:
self.userhosts.data[nick] = userhost
def handle_352(self, ievent):
""" handle 352 response .. sync with userhosts cache. """
args = ievent.arguments
channel = args[1]
nick = args[5]
user = args[2]
host = args[3]
userhost = "%s@%s" % (user, host)
logging.debug('adding %s to userhosts: %s' % (nick, userhost))
if self.userhosts:
self.userhosts.data[nick] = userhost
if self.userchannels:
self.userchannels.adduniq(nick, channel)
def handle_353(self, ievent):
""" handle 353 .. check if we are op. """
userlist = ievent.txt.split()
chan = ievent.channel
for i in userlist:
if i[0] == '@' and i[1:] == self.nick:
if chan not in self.state['opchan']:
self.state['opchan'].append(chan)
def handle_324(self, ievent):
""" handle mode request responses. """
self.channels.set(ievent.channel, 'mode', ievent.arguments[2])
def handle_invite(self, ievent):
""" join channel if invited by OPER. """
if self.users and self.users.allowed(ievent.userhost, ['OPER', ]):
self.join(ievent.txt)
def settopic(self, channel, txt):
""" set topic of channel to txt. """
if not channel or not txt:
return
self.putonqueue(7, 'TOPIC %s :%s' % (channel, txt))
def gettopic(self, channel):
""" get topic data. """
if not channel:
return
queue332 = Queue.Queue()
queue333 = Queue.Queue()
self.wait.register('332', channel, queue332)
self.wait.register('333', channel, queue333)
self.putonqueue(7, 'TOPIC %s' % channel)
try:
res = queue332.get(1, 5)
except Queue.Empty:
return None
what = res.txt
try:
res = queue333.get(1, 5)
except Queue.Empty:
return None
try:
splitted = res.postfix.split()
who = splitted[2]
when = float(splitted[3])
except (IndexError, ValueError):
return None
return (what, who, when)
| Python |
# gozerbot/wait.py
#
#
""" wait for ircevent based on ircevent.CMND """
## gozerbot imports
from gozerlib.utils.locking import lockdec
import gozerlib.threads as thr
## basic imports
import time
import thread
## locks
waitlock = thread.allocate_lock()
locked = lockdec(waitlock)
## classes
class Wait(object):
""" lists of ircevents to wait for """
def __init__(self):
self.waitlist = []
self.ticket = 0
def register(self, cmnd, catch, queue, timeout=15):
""" register wait for cmnd. """
logging.debug('irc - wait - registering for cmnd ' + cmnd)
self.ticket += 1
self.waitlist.insert(0, (cmnd, catch, queue, self.ticket))
if timeout:
# start timeout thread
thr.start_new_thread(self.dotimeout, (timeout, self.ticket))
return self.ticket
def check(self, ievent):
""" check if there are wait items for ievent .. check if 'catch'
matches on ievent.postfix if so put ievent on queue. """
cmnd = ievent.cmnd
for item in self.waitlist:
if item[0] == cmnd:
if cmnd == "JOIN":
catch = ievent.txt + ievent.postfix
else:
catch = ievent.nick + ievent.postfix
if item[1] in catch:
ievent.ticket = item[3]
item[2].put_nowait(ievent)
self.delete(ievent.ticket)
logging.debug('irc - wait - got response for %s' % item[0])
ievent.isresponse = True
@thr.threaded
def dotimeout(self, timeout, ticket):
""" start timeout thread for wait with ticket nr. """
time.sleep(float(timeout))
self.delete(ticket)
@locked
def delete(self, ticket):
""" delete wait item with ticket nr. """
for itemnr in range(len(self.waitlist)-1, -1, -1):
if self.waitlist[itemnr][3] == ticket:
self.waitlist[itemnr][2].put_nowait(None)
del self.waitlist[itemnr]
logging.debug('irc - deleted ' + str(ticket))
return 1
class Privwait(Wait):
""" wait for privmsg .. catch is on nick """
def register(self, catch, queue, timeout=15):
""" register wait for privmsg. """
logging.debug('irc - privwait - registering for ' + catch)
return Wait.register(self, 'PRIVMSG', catch, queue, timeout)
def check(self, ievent):
""" check if there are wait items for ievent. """
for item in self.waitlist:
if item[0] == 'PRIVMSG':
if ievent.userhost == item[1]:
ievent.ticket = item[3]
item[2].put_nowait(ievent)
self.delete(ievent.ticket)
logging.debug('irc - privwait - got response for %s' % item[0])
ievent.isresponse = True
| Python |
# gozerbot/ircevent.py
#
#
# http://www.irchelp.org/irchelp/rfc/rfc2812.txt
""" an ircevent is extracted from the IRC string received from the server. """
__copyright__ = 'this file is in the public domain'
## gozerbot imports
from gozerlib.socket.utils.generic import fix_format, toenc, fromenc, stripident, makeargrest
from gozerlib.eventbase import EventBase
#from gozerlib.utils.generic import makeargrest
from gozerlib.config import cfg as config
## basic imports
import time
import re
import types
import copy
import logging
cpy = copy.deepcopy
try:
dotchars = config['dotchars']
if not dotchars:
dotchars = ' .. '
except KeyError:
dotchars = ' .. '
def makeargrest(ievent):
""" create ievent.args and ievent.rest .. this is needed because \
ircevents might be created outside the parse() function.
"""
if not ievent.txt:
return
try:
ievent.args = ievent.txt.split()[1:]
except ValueError:
ievent.args = []
try:
cmnd, ievent.rest = ievent.txt.split(' ', 1)
except ValueError:
ievent.rest = ""
ievent.command = ievent.txt.split(' ')[0]
class Ircevent(EventBase):
""" represents an IRC event. """
def __copy__(self):
return Ircevent(self)
def __deepcopy__(self, bla):
return Ircevent(self)
def toirc(self):
pass
def parse(self, bot, rawstr):
""" parse raw string into ircevent. """
self.bot = bot
bot.nrevents += 1
rawstr = rawstr.rstrip()
splitted = re.split('\s+', rawstr)
# check if there is a prefix (: in front)
if not rawstr[0] == ':':
# no prefix .. 1st word is command
splitted.insert(0, ":none!none@none")
rawstr = ":none!none@none " + rawstr
self.prefix = splitted[0][1:]
# get nick/userhost
nickuser = self.prefix.split('!')
if len(nickuser) == 2:
self.nick = nickuser[0]
if self.bot.cfg['stripident'] or config['stripident']:
self.userhost = stripident(nickuser[1])
else:
self.userhost = nickuser[1]
# set command
self.cmnd = splitted[1]
self.cbtype = self.cmnd
# split string based of postfix count .. nr of items ater the command
if pfc.has_key(self.cmnd):
self.arguments = splitted[2:pfc[self.cmnd]+2]
txtsplit = re.split('\s+', rawstr, pfc[self.cmnd]+2)
self.txt = txtsplit[-1]
else:
self.arguments = splitted[2:]
# 1st argument is target
if self.arguments:
self.target = self.arguments[0]
self.postfix = ' '.join(self.arguments)
# check if target is text
if self.target and self.target.startswith(':'):
self.txt = ' '.join(self.arguments)
# strip strarting ':' from txt
if self.txt:
if self.txt[0] == ":":
self.txt = self.txt[1:]
self.usercmnd = self.txt.split()[0]
#logging.debug("irc - event - %s %s %s" % (self.cmnd, self.arguments, self.txt))
# set ircevent attributes
if self.cmnd == 'PING':
self.speed = 10
if self.cmnd == 'PRIVMSG':
self.channel = self.arguments[0]
if '\001' in self.txt:
self.isctcp = True
elif self.cmnd == 'JOIN' or self.cmnd == 'PART':
if self.arguments:
self.channel = self.arguments[0]
else:
self.channel = self.txt
elif self.cmnd == 'MODE':
self.channel = self.arguments[0]
elif self.cmnd == 'TOPIC':
self.channel = self.arguments[0]
elif self.cmnd == 'KICK':
self.channel = self.arguments[0]
elif self.cmnd == '353':
self.channel = self.arguments[2]
elif self.cmnd == '324':
self.channel = self.arguments[1]
if self.userhost:
# userhost before possible stripident
self.ruserhost = self.userhost
# jabber compat .. this is userhost on irc
self.stripped = self.userhost
# determine user
self.user = stripident(self.userhost).split('@')[0]
self.origtxt = self.txt
if self.channel:
self.channel = self.channel.strip()
self.origchannel = self.channel
# show error
try:
nr = int(self.cmnd)
if nr > 399 and not nr == 422:
logging.error('irc - %s - %s - %s' % (self.cmnd, self.arguments, self.txt))
except ValueError:
pass
makeargrest(self)
return self
def reply(self, txt, result=None, nick=None, dot=False, nritems=False, nr=False, fromm=None, private=False, how=''):
# don't replu is result is empty list
if result == []:
return
if not how:
how = 'msg'
# init
restxt = ""
splitted = []
# make reply if result is a dict
if type(result) == types.DictType:
for i, j in result.iteritems():
if type(j) == types.ListType:
try:
z = dotchars.join(j)
except TypeError:
z = unicode(j)
else:
z = j
res = "%s: %s" % (i, z)
splitted.append(res)
if dot == True:
restxt += "%s%s" % (res, dotchars)
else:
restxt += "%s %s" % (dot or ' ', res)
if restxt:
if dot == True:
restxt = restxt[:-6]
elif dot:
restxt = restxt[:-len(dot)]
lt = False # set if result is list
# set vars if result is a list
if type(txt) == types.ListType and not result:
result = txt
origtxt = u""
lt = True
else:
origtxt = txt
if result:
lt = True
# if queues are set write output to them
if self.queues:
for i in self.queues:
if splitted:
for item in splitted:
i.put_nowait(item)
elif lt:
for j in result:
i.put_nowait(j)
elif restxt:
i.put_nowait(restxt)
else:
i.put_nowait(txt)
if self.onlyqueues:
return
# check if bot is set in event
if not self.bot:
logging.eror('irc - no bot defined in event')
return
# make response
pretxt = origtxt
if lt and not restxt:
res = []
# check if there are list in list
for i in result:
if type(i) == types.ListType or type(i) == types.TupleType:
try:
res.append(dotchars.join(i))
except TypeError:
res.extend(i)
else:
res.append(i)
# if nritems is set ..
result = res
if nritems:
if len(result) > 1:
pretxt += "(%s items) .. " % len(result)
txtlist = result
# prepend item number for results
if not nr is False:
try:
start = int(nr)
except ValueError:
start = 0
txtlist2 = []
teller = start
for i in txtlist:
txtlist2.append(u"%s) %s" % (teller, i))
teller += 1
txtlist = txtlist2
# convert results to encoding
txtl = []
for item in txtlist:
txtl.append(toenc(item))
txtlist = txtl
# join result with dot
if dot == True:
restxt = dotchars.join(txtlist)
elif dot:
restxt = dot.join(txtlist)
else:
restxt = ' '.join(txtlist)
# see if txt needs to be prepended
if pretxt:
try:
restxt = pretxt + restxt
except TypeError:
logging.warn("irc - event - can't add %s and %s" % (str(pretxt), str(restxt)))
# if txt in result is filtered ignore the reuslt
#if self.filtered(restxt):
# return
# if event is DCC based write result directly to socket
if self.cmnd == 'DCC' and self.sock:
self.bot.say(self.sock, restxt, speed=self.speed, how=how)
return
# if nick is set write result to nick in question
if nick:
self.bot.say(nick, restxt, fromm=nick, speed=self.speed, how=how)
return
# if originatiog event is a private message or private flaf is set
if self.msg or private:
self.bot.say(self.nick, restxt, fromm=self.nick, speed=self.speed, how=how)
return
# check if bot is in silent mode .. if so use /msg
silent = False
channel = self.printto or self.channel
try:
silent = self.bot.channels[channel]['silent']
except (KeyError, TypeError):
pass
fromm = fromm or self.nick
# check if notice needs to be used
if silent:
notice = False
try:
notice = self.bot.channels[channel]['notice']
except (KeyError, TypeError):
pass
if notice:
self.bot.say(self.nick, restxt, how='notice', fromm=fromm, speed=self.speed)
else:
self.bot.say(self.nick, restxt, fromm=fromm, speed=self.speed, how=how)
return
# if printto is set used that as the target
if self.printto:
self.bot.say(self.printto, restxt, fromm=fromm, speed=self.speed, how=how)
return
else:
self.bot.say(self.channel, restxt, fromm=fromm, speed=self.speed, how=how)
# postfix count aka how many arguments
pfc = {}
pfc['NICK'] = 0
pfc['QUIT'] = 0
pfc['SQUIT'] = 1
pfc['JOIN'] = 0
pfc['PART'] = 1
pfc['TOPIC'] = 1
pfc['KICK'] = 2
pfc['PRIVMSG'] = 1
pfc['NOTICE'] = 1
pfc['SQUERY'] = 1
pfc['PING'] = 0
pfc['ERROR'] = 0
pfc['AWAY'] = 0
pfc['WALLOPS'] = 0
pfc['INVITE'] = 1
pfc['001'] = 1
pfc['002'] = 1
pfc['003'] = 1
pfc['004'] = 4
pfc['005'] = 15
pfc['302'] = 1
pfc['303'] = 1
pfc['301'] = 2
pfc['305'] = 1
pfc['306'] = 1
pfc['311'] = 5
pfc['312'] = 3
pfc['313'] = 2
pfc['317'] = 3
pfc['318'] = 2
pfc['319'] = 2
pfc['314'] = 5
pfc['369'] = 2
pfc['322'] = 3
pfc['323'] = 1
pfc['325'] = 3
pfc['324'] = 4
pfc['331'] = 2
pfc['332'] = 2
pfc['341'] = 3
pfc['342'] = 2
pfc['346'] = 3
pfc['347'] = 2
pfc['348'] = 3
pfc['349'] = 2
pfc['351'] = 3
pfc['352'] = 7
pfc['315'] = 2
pfc['353'] = 3
pfc['366'] = 2
pfc['364'] = 3
pfc['365'] = 2
pfc['367'] = 2
pfc['368'] = 2
pfc['371'] = 1
pfc['374'] = 1
pfc['375'] = 1
pfc['372'] = 1
pfc['376'] = 1
pfc['381'] = 1
pfc['382'] = 2
pfc['383'] = 5
pfc['391'] = 2
pfc['392'] = 1
pfc['393'] = 1
pfc['394'] = 1
pfc['395'] = 1
pfc['262'] = 3
pfc['242'] = 1
pfc['235'] = 3
pfc['250'] = 1
pfc['251'] = 1
pfc['252'] = 2
pfc['253'] = 2
pfc['254'] = 2
pfc['255'] = 1
pfc['256'] = 2
pfc['257'] = 1
pfc['258'] = 1
pfc['259'] = 1
pfc['263'] = 2
pfc['265'] = 1
pfc['266'] = 1
pfc['401'] = 2
pfc['402'] = 2
pfc['403'] = 2
pfc['404'] = 2
pfc['405'] = 2
pfc['406'] = 2
pfc['407'] = 2
pfc['408'] = 2
pfc['409'] = 1
pfc['411'] = 1
pfc['412'] = 1
pfc['413'] = 2
pfc['414'] = 2
pfc['415'] = 2
pfc['421'] = 2
pfc['422'] = 1
pfc['423'] = 2
pfc['424'] = 1
pfc['431'] = 1
pfc['432'] = 2
pfc['433'] = 2
pfc['436'] = 2
pfc['437'] = 2
pfc['441'] = 3
pfc['442'] = 2
pfc['443'] = 3
pfc['444'] = 2
pfc['445'] = 1
pfc['446'] = 1
pfc['451'] = 1
pfc['461'] = 2
pfc['462'] = 1
pfc['463'] = 1
pfc['464'] = 1
pfc['465'] = 1
pfc['467'] = 2
pfc['471'] = 2
pfc['472'] = 2
pfc['473'] = 2
pfc['474'] = 2
pfc['475'] = 2
pfc['476'] = 2
pfc['477'] = 2
pfc['478'] = 3
pfc['481'] = 1
pfc['482'] = 2
pfc['483'] = 1
pfc['484'] = 1
pfc['485'] = 1
pfc['491'] = 1
pfc['501'] = 1
pfc['502'] = 1
pfc['700'] = 2
# default event used to initialise events
defaultevent = EventBase()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.