code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility library containing various helpers used by the API."""
import re
CUSTOM_SERIALIZE_METHOD_NAME = 'serialize'
MARKUP_RE = re.compile(r'<([^>]*?)>')
def force_unicode(object):
""" Return the Unicode string version of object, with UTF-8 encoding. """
if isinstance(object, unicode):
return object
return unicode(str(object), 'utf-8')
def parse_markup(markup):
"""Parses a bit of markup into robot compatible text.
For now this is a rough approximation.
"""
def replace_tag(group):
if not group.groups:
return ''
tag = group.groups()[0].split(' ', 1)[0]
if (tag == 'p' or tag == 'br'):
return '\n'
return ''
return MARKUP_RE.sub(replace_tag, markup)
def is_iterable(inst):
"""Returns whether or not this is a list, tuple, set or dict .
Note that this does not return true for strings.
"""
return hasattr(inst, '__iter__')
def is_dict(inst):
"""Returns whether or not the specified instance is a dict."""
return hasattr(inst, 'iteritems')
def is_user_defined_new_style_class(obj):
"""Returns whether or not the specified instance is a user-defined type."""
return type(obj).__module__ != '__builtin__'
def lower_camel_case(s):
"""Converts a string to lower camel case.
Examples:
foo => foo
foo_bar => fooBar
foo__bar => fooBar
foo_bar_baz => fooBarBaz
Args:
s: The string to convert to lower camel case.
Returns:
The lower camel cased string.
"""
return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_'))
def non_none_dict(d):
"""return a copy of the dictionary without none values."""
return dict([a for a in d.items() if not a[1] is None])
def _serialize_attributes(obj):
"""Serializes attributes of an instance.
Iterates all attributes of an object and invokes serialize if they are
public and not callable.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
data = {}
for attr_name in dir(obj):
if attr_name.startswith('_'):
continue
attr = getattr(obj, attr_name)
if attr is None or callable(attr):
continue
# Looks okay, serialize it.
data[lower_camel_case(attr_name)] = serialize(attr)
return data
def _serialize_dict(d):
"""Invokes serialize on all of its key/value pairs.
Args:
d: The dict instance to serialize.
Returns:
The serialized dict.
"""
data = {}
for k, v in d.items():
data[lower_camel_case(k)] = serialize(v)
return data
def serialize(obj):
"""Serializes any instance.
If this is a user-defined instance
type, it will first check for a custom Serialize() function and use that
if it exists. Otherwise, it will invoke serialize all of its public
attributes. Lists and dicts are serialized trivially.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
if is_user_defined_new_style_class(obj):
if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME):
method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME)
if callable(method):
return method()
return _serialize_attributes(obj)
elif is_dict(obj):
return _serialize_dict(obj)
elif is_iterable(obj):
return [serialize(v) for v in obj]
return obj
class StringEnum(object):
"""Enum like class that is configured with a list of values.
This class effectively implements an enum for Elements, except for that
the actual values of the enums will be the string values.
"""
def __init__(self, *values):
for name in values:
setattr(self, name, name)
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the robot module."""
import unittest
import events
import ops
import robot
import simplejson
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13","'
'annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/davidbyttow@google.com","value":"David"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
NEW_WAVE_JSON = [{"data":
{"waveletId": "wavesandbox.com!conv+root",
"blipId": "b+LrODcLZkDlu", "waveId":
"wavesandbox.com!w+LrODcLZkDlt"},
"id": "op2"}]
NEW_WAVE_JSON_OLD = [{'data':
[{'data':
{'waveletId': 'googlewave.com!conv+root',
'blipId': 'b+VqQXQbZkCP1',
'waveId': 'googlewave.com!w+VqQXQbZkCP0'},
'id': 'wavelet.create1265055048410'}],
'id': 'op10'}];
class TestRobot(unittest.TestCase):
"""Tests for testing the basic parsing of json in robots."""
def setUp(self):
self.robot = robot.Robot('Testy')
def testCreateWave(self):
self.robot.submit = lambda x: NEW_WAVE_JSON
new_wave = self.robot.new_wave('wavesandbox.com', submit=True)
self.assertEqual('wavesandbox.com!w+LrODcLZkDlt', new_wave.wave_id)
self.robot.submit = lambda x: NEW_WAVE_JSON_OLD
new_wave = self.robot.new_wave('googlewave.com', submit=True)
self.assertEqual('googlewave.com!w+VqQXQbZkCP0', new_wave.wave_id)
def testEventParsing(self):
def check(event, wavelet):
# Test some basic properties; the rest should be covered by
# ops.CreateContext.
root = wavelet.root_blip
self.assertEqual(1, len(wavelet.blips))
self.assertEqual('wdykLROk*13', root.blip_id)
self.assertEqual('test.com!wdykLROk*11', root.wave_id)
self.assertEqual('test.com!conv+root', root.wavelet_id)
self.assertEqual('WAVELET_PARTICIPANTS_CHANGED', event.type)
self.assertEqual({'participantsRemoved': [],
'participantsAdded': ['monty@appspot.com']},
event.properties)
self.robot.test_called = True
self.robot.test_called = False
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
self.assertTrue(self.robot.test_called)
operations = simplejson.loads(json)
# there should be one operation indicating the current version:
self.assertEqual(1, len(operations))
def testWrongEventsIgnored(self):
self.robot.test_called = True
def check(event, wavelet):
called = True
self.robot.test_called = False
self.robot.register_handler(events.BlipSubmitted,
check)
self.robot.process_events(TEST_JSON)
self.assertFalse(self.robot.test_called)
def testOperationParsing(self):
def check(event, wavelet):
wavelet.reply()
wavelet.title = 'new title'
wavelet.root_blip.append_markup('<b>Hello</b>')
self.robot.register_handler(events.WaveletParticipantsChanged,
check)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
expected = set([ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
ops.WAVELET_APPEND_BLIP,
ops.WAVELET_SET_TITLE,
ops.DOCUMENT_APPEND_MARKUP])
methods = [operation['method'] for operation in operations]
for method in methods:
self.assertTrue(method in expected)
expected.remove(method)
self.assertEquals(0, len(expected))
def testSerializeWavelets(self):
wavelet = self.robot.blind_wavelet(TEST_JSON)
serialized = wavelet.serialize()
unserialized = self.robot.blind_wavelet(serialized)
self.assertEquals(wavelet.creator, unserialized.creator)
self.assertEquals(wavelet.creation_time, unserialized.creation_time)
self.assertEquals(wavelet.last_modified_time,
unserialized.last_modified_time)
self.assertEquals(wavelet.root_blip.blip_id, unserialized.root_blip.blip_id)
self.assertEquals(wavelet.title, unserialized.title)
self.assertEquals(wavelet.wave_id, unserialized.wave_id)
self.assertEquals(wavelet.wavelet_id, unserialized.wavelet_id)
self.assertEquals(wavelet.domain, unserialized.domain)
def testProxiedBlindWavelet(self):
def handler(event, wavelet):
blind_wavelet = self.robot.blind_wavelet(TEST_JSON, 'proxyid')
blind_wavelet.reply()
blind_wavelet.submit_with(wavelet)
self.robot.register_handler(events.WaveletParticipantsChanged, handler)
json = self.robot.process_events(TEST_JSON)
operations = simplejson.loads(json)
self.assertEqual(2, len(operations))
self.assertEquals(ops.ROBOT_NOTIFY_CAPABILITIES_HASH,
operations[0]['method'])
self.assertEquals(ops.WAVELET_APPEND_BLIP, operations[1]['method'])
self.assertEquals('proxyid', operations[1]['params']['proxyingFor'])
def testCapabilitiesHashIncludesContextAndFilter(self):
robot1 = robot.Robot('Robot1')
robot1.register_handler(events.WaveletSelfAdded, lambda: '')
robot2 = robot.Robot('Robot2')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL)
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
robot3 = robot.Robot('Robot3')
robot2.register_handler(events.WaveletSelfAdded, lambda: '',
context=events.Context.ALL, filter="foo")
self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash())
self.assertNotEqual(robot1.capabilities_hash(), robot3.capabilities_hash())
self.assertNotEqual(robot2.capabilities_hash(), robot3.capabilities_hash())
class TestGetCapabilitiesXml(unittest.TestCase):
def setUp(self):
self.robot = robot.Robot('Testy')
self.robot.capabilities_hash = lambda: '1'
def assertStringsEqual(self, s1, s2):
self.assertEqual(s1, s2, 'Strings differ:\n%s--\n%s' % (s1, s2))
def testDefault(self):
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testUrls(self):
profile_robot = robot.Robot(
'Testy',
image_url='http://example.com/image.png',
profile_url='http://example.com/profile.xml')
profile_robot.capabilities_hash = lambda: '1'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = profile_robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testConsumerKey(self):
# setup_oauth doesn't work during testing, so heavy handed setting of
# properties it is:
self.robot._consumer_key = 'consumer'
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:consumer_key>consumer</w:consumer_key>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n</w:capabilities>\n'
'</w:robot>\n') % ops.PROTOCOL_VERSION
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
def testCapsAndEvents(self):
self.robot.register_handler(events.BlipSubmitted, None,
context=[events.Context.SELF,
events.Context.ROOT])
expected = (
'<?xml version="1.0"?>\n'
'<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n'
'<w:version>1</w:version>\n'
'<w:protocolversion>%s</w:protocolversion>\n'
'<w:capabilities>\n'
' <w:capability name="%s" context="SELF,ROOT"/>\n'
'</w:capabilities>\n'
'</w:robot>\n') % (ops.PROTOCOL_VERSION, events.BlipSubmitted.type)
xml = self.robot.capabilities_xml()
self.assertStringsEqual(expected, xml)
if __name__ == '__main__':
unittest.main()
| Python |
import cgi
import urllib
import time
import random
import urlparse
import hmac
import base64
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join(str(random.randint(0, 9)) for i in range(length))
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
@staticmethod
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems())
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values)
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
@staticmethod
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
@staticmethod
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
@staticmethod
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = escape(callback)
return OAuthRequest(http_method, http_url, parameters)
# util function: turn Authorization: header into parameters, has to do some unescaping
@staticmethod
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
# util function: turn url string into parameters, has to do some unescaping
@staticmethod
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return base64.b64encode(hashed.digest())
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elements are non-text bits living in blips like images, gadgets etc.
This module defines the Element class and the derived classes.
"""
import base64
import logging
import sys
import util
class Element(object):
"""Elements are non-text content within a document.
These are generally abstracted from the Robot. Although a Robot can query the
properties of an element it can only interact with the specific types that
the element represents.
Properties of elements are both accessible directly (image.url) and through
the properties dictionary (image.properties['url']). In general Element
should not be instantiated by robots, but rather rely on the derived classes.
"""
# INLINE_BLIP_TYPE is not a separate type since it shouldn't be instantiated,
# only be used for introspection
INLINE_BLIP_TYPE = "INLINE_BLIP"
def __init__(self, element_type, **properties):
"""Initializes self with the specified type and any properties.
Args:
element_type: string typed member of ELEMENT_TYPE
properties: either a dictionary of initial properties, or a dictionary
with just one member properties that is itself a dictionary of
properties. This allows us to both use
e = Element(atype, prop1=val1, prop2=prop2...)
and
e = Element(atype, properties={prop1:val1, prop2:prop2..})
"""
if len(properties) == 1 and 'properties' in properties:
properties = properties['properties']
self._type = element_type
# as long as the operation_queue of an element in None, it is
# unattached. After an element is acquired by a blip, the blip
# will set the operation_queue to make sure all changes to the
# element are properly send to the server.
self._operation_queue = None
self._properties = properties.copy()
@property
def type(self):
"""The type of this element."""
return self._type
@classmethod
def from_json(cls, json):
"""Class method to instantiate an Element based on a json string."""
etype = json['type']
props = json['properties'].copy()
element_class = ALL.get(etype)
if not element_class:
# Unknown type. Server could be newer than we are
return Element(element_type=etype, properties=props)
return element_class.from_props(props)
def get(self, key, default=None):
"""Standard get interface."""
return self._properties.get(key, default)
def __getattr__(self, key):
return self._properties[key]
def serialize(self):
"""Custom serializer for Elements."""
return util.serialize({'properties': util.non_none_dict(self._properties),
'type': self._type})
class Input(Element):
"""A single-line input element."""
class_type = 'INPUT'
def __init__(self, name, value=''):
super(Input, self).__init__(Input.class_type,
name=name,
value=value,
default_value=value)
@classmethod
def from_props(cls, props):
return Input(name=props.get('name'), value=props.get('value'))
class Check(Element):
"""A checkbox element."""
class_type = 'CHECK'
def __init__(self, name, value=''):
super(Check, self).__init__(Check.class_type,
name=name, value=value, default_value=value)
@classmethod
def from_props(cls, props):
return Check(name=props.get('name'), value=props.get('value'))
class Button(Element):
"""A button element."""
class_type = 'BUTTON'
def __init__(self, name, value):
super(Button, self).__init__(Button.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return Button(name=props.get('name'), value=props.get('value'))
class Label(Element):
"""A label element."""
class_type = 'LABEL'
def __init__(self, label_for, caption):
super(Label, self).__init__(Label.class_type,
name=label_for, value=caption)
@classmethod
def from_props(cls, props):
return Label(label_for=props.get('name'), caption=props.get('value'))
class RadioButton(Element):
"""A radio button element."""
class_type = 'RADIO_BUTTON'
def __init__(self, name, group):
super(RadioButton, self).__init__(RadioButton.class_type,
name=name, value=group)
@classmethod
def from_props(cls, props):
return RadioButton(name=props.get('name'), group=props.get('value'))
class RadioButtonGroup(Element):
"""A group of radio buttons."""
class_type = 'RADIO_BUTTON_GROUP'
def __init__(self, name, value):
super(RadioButtonGroup, self).__init__(RadioButtonGroup.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return RadioButtonGroup(name=props.get('name'), value=props.get('value'))
class Password(Element):
"""A password element."""
class_type = 'PASSWORD'
def __init__(self, name, value):
super(Password, self).__init__(Password.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return Password(name=props.get('name'), value=props.get('value'))
class TextArea(Element):
"""A text area element."""
class_type = 'TEXTAREA'
def __init__(self, name, value):
super(TextArea, self).__init__(TextArea.class_type,
name=name, value=value)
@classmethod
def from_props(cls, props):
return TextArea(name=props.get('name'), value=props.get('value'))
class Line(Element):
"""A line element.
Note that Lines are represented in the text as newlines.
"""
class_type = 'LINE'
# Possible line types:
#: Designates line as H1, largest heading.
TYPE_H1 = 'h1'
#: Designates line as H2 heading.
TYPE_H2 = 'h2'
#: Designates line as H3 heading.
TYPE_H3 = 'h3'
#: Designates line as H4 heading.
TYPE_H4 = 'h4'
#: Designates line as H5, smallest heading.
TYPE_H5 = 'h5'
#: Designates line as a bulleted list item.
TYPE_LI = 'li'
# Possible values for align
#: Sets line alignment to left.
ALIGN_LEFT = 'l'
#: Sets line alignment to right.
ALIGN_RIGHT = 'r'
#: Sets line alignment to centered.
ALIGN_CENTER = 'c'
#: Sets line alignment to justified.
ALIGN_JUSTIFIED = 'j'
def __init__(self,
line_type=None,
indent=None,
alignment=None,
direction=None):
super(Line, self).__init__(Line.class_type,
lineType=line_type,
indent=indent,
alignment=alignment,
direction=direction)
@classmethod
def from_props(cls, props):
return Line(line_type=props.get('lineType'),
indent=props.get('indent'),
alignment=props.get('alignment'),
direction=props.get('direction'))
class Gadget(Element):
"""A gadget element."""
class_type = 'GADGET'
def __init__(self, url, props=None):
if props is None:
props = {}
props['url'] = url
super(Gadget, self).__init__(Gadget.class_type, properties=props)
@classmethod
def from_props(cls, props):
return Gadget(props.get('url'), props)
def serialize(self):
"""Gadgets allow for None values."""
return {'properties': self._properties, 'type': self._type}
def keys(self):
"""Get the valid keys for this gadget."""
return [x for x in self._properties.keys() if x != 'url']
class Installer(Element):
"""An installer element."""
class_type = 'INSTALLER'
def __init__(self, manifest):
super(Installer, self).__init__(Installer.class_type, manifest=manifest)
@classmethod
def from_props(cls, props):
return Installer(props.get('manifest'))
class Image(Element):
"""An image element."""
class_type = 'IMAGE'
def __init__(self, url='', width=None, height=None,
attachmentId=None, caption=None):
super(Image, self).__init__(Image.class_type, url=url, width=width,
height=height, attachmentId=attachmentId, caption=caption)
@classmethod
def from_props(cls, props):
props = dict([(key.encode('utf-8'), value)
for key, value in props.items()])
return apply(Image, [], props)
class Attachment(Element):
"""An attachment element.
To create a new attachment, caption and data are needed.
mimeType, attachmentId and attachmentUrl are sent via events.
"""
class_type = 'ATTACHMENT'
def __init__(self, caption=None, data=None, mimeType=None, attachmentId=None,
attachmentUrl=None):
Attachment.originalData = data
super(Attachment, self).__init__(Attachment.class_type, caption=caption,
data=data, mimeType=mimeType, attachmentId=attachmentId,
attachmentUrl=attachmentUrl)
def __getattr__(self, key):
if key and key == 'data':
return Attachment.originalData
return super(Attachment, self).__getattr__(key)
@classmethod
def from_props(cls, props):
props = dict([(key.encode('utf-8'), value)
for key, value in props.items()])
return apply(Attachment, [], props)
def serialize(self):
"""Serializes the attachment object into JSON.
The attachment data is base64 encoded.
"""
if self.data:
self._properties['data'] = base64.encodestring(self.data)
return super(Attachment, self).serialize()
def is_element(cls):
"""Returns whether the passed class is an element."""
try:
if not issubclass(cls, Element):
return False
h = hasattr(cls, 'class_type')
return hasattr(cls, 'class_type')
except TypeError:
return False
ALL = dict([(item.class_type, item) for item in globals().copy().values()
if is_element(item)])
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various API-specific exception classes.
This module contains various specific exception classes that are raised by
the library back to the client.
"""
class Error(Exception):
"""Base library error type."""
| Python |
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declares the api package."""
| Python |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc. All Rights Reserved.
"""Tests for google3.walkabout.externalagents.api.commandline_robot_runner."""
__author__ = 'douwe@google.com (Douwe Osinga)'
import StringIO
from google3.pyglib import app
from google3.pyglib import flags
from google3.testing.pybase import googletest
from google3.walkabout.externalagents.api import commandline_robot_runner
from google3.walkabout.externalagents.api import events
FLAGS = flags.FLAGS
BLIP_JSON = ('{"wdykLROk*13":'
'{"lastModifiedTime":1242079608457,'
'"contributors":["someguy@test.com"],'
'"waveletId":"test.com!conv+root",'
'"waveId":"test.com!wdykLROk*11",'
'"parentBlipId":null,'
'"version":3,'
'"creator":"someguy@test.com",'
'"content":"\\nContent!",'
'"blipId":"wdykLROk*13",'
'"annotations":[{"range":{"start":0,"end":1},'
'"name":"user/e/otherguy@test.com","value":"Other"}],'
'"elements":{},'
'"childBlipIds":[]}'
'}')
WAVELET_JSON = ('{"lastModifiedTime":1242079611003,'
'"title":"A title",'
'"waveletId":"test.com!conv+root",'
'"rootBlipId":"wdykLROk*13",'
'"dataDocuments":null,'
'"creationTime":1242079608457,'
'"waveId":"test.com!wdykLROk*11",'
'"participants":["someguy@test.com","monty@appspot.com"],'
'"creator":"someguy@test.com",'
'"version":5}')
EVENTS_JSON = ('[{"timestamp":1242079611003,'
'"modifiedBy":"someguy@test.com",'
'"properties":{"participantsRemoved":[],'
'"participantsAdded":["monty@appspot.com"]},'
'"type":"WAVELET_PARTICIPANTS_CHANGED"}]')
TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % (
BLIP_JSON, WAVELET_JSON, EVENTS_JSON)
class CommandlineRobotRunnerTest(googletest.TestCase):
def testSimpleFlow(self):
FLAGS.eventdef_wavelet_participants_changed = 'x'
flag = 'eventdef_' + events.WaveletParticipantsChanged.type.lower()
setattr(FLAGS, flag, 'w.title="New title!"')
input_stream = StringIO.StringIO(TEST_JSON)
output_stream = StringIO.StringIO()
commandline_robot_runner.run_bot(input_stream, output_stream)
res = output_stream.getvalue()
self.assertTrue('wavelet.setTitle' in res)
def main(unused_argv):
googletest.main()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import element
import errors
import util
class Annotation(object):
"""Models an annotation on a document.
Annotations are key/value pairs over a range of content. Annotations
can be used to store data or to be interpreted by a client when displaying
the data.
"""
# Use the following constants to control the display of the client
#: Reserved annotation for setting background color of text.
BACKGROUND_COLOR = "style/backgroundColor"
#: Reserved annotation for setting color of text.
COLOR = "style/color"
#: Reserved annotation for setting font family of text.
FONT_FAMILY = "style/fontFamily"
#: Reserved annotation for setting font family of text.
FONT_SIZE = "style/fontSize"
#: Reserved annotation for setting font style of text.
FONT_STYLE = "style/fontStyle"
#: Reserved annotation for setting font weight of text.
FONT_WEIGHT = "style/fontWeight"
#: Reserved annotation for setting text decoration.
TEXT_DECORATION = "style/textDecoration"
#: Reserved annotation for setting vertical alignment.
VERTICAL_ALIGN = "style/verticalAlign"
def __init__(self, name, value, start, end):
self._name = name
self._value = value
self._start = start
self._end = end
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def start(self):
return self._start
@property
def end(self):
return self._end
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
if self._start >= where:
self._start += inc
if self._end >= where:
self._end += inc
def serialize(self):
"""Serializes the annotation.
Returns:
A dict containing the name, value, and range values.
"""
return {'name': self._name,
'value': self._value,
'range': {'start': self._start,
'end': self._end}}
class Annotations(object):
"""A dictionary-like object containing the annotations, keyed by name."""
def __init__(self, operation_queue, blip):
self._operation_queue = operation_queue
self._blip = blip
self._store = {}
def __contains__(self, what):
if isinstance(what, Annotation):
what = what.name
return what in self._store
def _add_internal(self, name, value, start, end):
"""Internal add annotation does not send out operations."""
if name in self._store:
# TODO: use bisect to make this more efficient.
new_list = []
for existing in self._store[name]:
if start > existing.end or end < existing.start:
new_list.append(existing)
else:
if existing.value == value:
# merge the annotations:
start = min(existing.start, start)
end = max(existing.end, end)
else:
# chop the bits off the existing annotation
if existing.start < start:
new_list.append(Annotation(
existing.name, existing.value, existing.start, start))
if existing.end > end:
new_list.append(Annotation(
existing.name, existing.value, existing.end, end))
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
else:
self._store[name] = [Annotation(name, value, start, end)]
def _delete_internal(self, name, start=0, end=-1):
"""Remove the passed annotaion from the internal representation."""
if not name in self._store:
return
if end < 0:
end = len(self._blip) + end
new_list = []
for a in self._store[name]:
if start > a.end or end < a.start:
new_list.append(a)
elif start < a.start and end > a.end:
continue
else:
if a.start < start:
new_list.append(Annotation(name, a.value, a.start, start))
if a.end > end:
new_list.append(Annotation(name, a.value, end, a.end))
if new_list:
self._store[name] = new_list
else:
del self._store[name]
def _shift(self, where, inc):
"""Shift annotation by 'inc' if it (partly) overlaps with 'where'."""
for annotations in self._store.values():
for annotation in annotations:
annotation._shift(where, inc)
# Merge fragmented annotations that should be contiguous, for example:
# Annotation('foo', 'bar', 1, 2) and Annotation('foo', 'bar', 2, 3).
for name, annotations in self._store.items():
new_list = []
for i, annotation in enumerate(annotations):
name = annotation.name
value = annotation.value
start = annotation.start
end = annotation.end
# Find the last end index.
for j, next_annotation in enumerate(annotations[i + 1:]):
# Not contiguous, skip.
if (end < next_annotation.start):
break
# Contiguous, merge.
if (end == next_annotation.start and value == next_annotation.value):
end = next_annotation.end
del annotations[j]
new_list.append(Annotation(name, value, start, end))
self._store[name] = new_list
def __len__(self):
return len(self._store)
def __getitem__(self, key):
return self._store[key]
def __iter__(self):
for l in self._store.values():
for ann in l:
yield ann
def names(self):
"""Return the names of the annotations in the store."""
return self._store.keys()
def serialize(self):
"""Return a list of the serialized annotations."""
res = []
for v in self._store.values():
res += [a.serialize() for a in v]
return res
class Blips(object):
"""A dictionary-like object containing the blips, keyed on blip ID."""
def __init__(self, blips):
self._blips = blips
def __getitem__(self, blip_id):
return self._blips[blip_id]
def __iter__(self):
return self._blips.__iter__()
def __len__(self):
return len(self._blips)
def _add(self, ablip):
self._blips[ablip.blip_id] = ablip
def _remove_with_id(self, blip_id):
del_blip = self._blips[blip_id]
if del_blip:
# Remove the reference to this blip from its parent.
parent_blip = self._blips[blip_id].parent_blip
if parent_blip:
parent_blip._child_blip_ids.remove(blip_id)
del self._blips[blip_id]
def get(self, blip_id, default_value=None):
"""Retrieves a blip.
Returns:
A Blip object. If none found for the ID, it returns None,
or if default_value is specified, it returns that.
"""
return self._blips.get(blip_id, default_value)
def serialize(self):
"""Serializes the blips.
Returns:
A dict of serialized blips.
"""
res = {}
for blip_id, item in self._blips.items():
res[blip_id] = item.serialize()
return res
class BlipRefs(object):
"""Represents a set of references to contents in a blip.
For example, a BlipRefs instance can represent the results
of a search, an explicitly set range, a regular expression,
or refer to the entire blip. BlipRefs are used to express
operations on a blip in a consistent way that can easily
be transfered to the server.
The typical way of creating a BlipRefs object is to use
selector methods on the Blip object. Developers will not
usually instantiate a BlipRefs object directly.
"""
DELETE = 'DELETE'
REPLACE = 'REPLACE'
INSERT = 'INSERT'
INSERT_AFTER = 'INSERT_AFTER'
ANNOTATE = 'ANNOTATE'
CLEAR_ANNOTATION = 'CLEAR_ANNOTATION'
UPDATE_ELEMENT = 'UPDATE_ELEMENT'
def __init__(self, blip, maxres=1):
self._blip = blip
self._maxres = maxres
@classmethod
def all(cls, blip, findwhat, maxres=-1, **restrictions):
"""Construct an instance representing the search for text or elements."""
obj = cls(blip, maxres)
obj._findwhat = findwhat
obj._restrictions = restrictions
obj._hits = lambda: obj._find(findwhat, maxres, **restrictions)
if findwhat is None:
# No findWhat, take the entire blip
obj._params = {}
else:
query = {'maxRes': maxres}
if isinstance(findwhat, basestring):
query['textMatch'] = findwhat
else:
query['elementMatch'] = findwhat.class_type
query['restrictions'] = restrictions
obj._params = {'modifyQuery': query}
return obj
@classmethod
def range(cls, blip, begin, end):
"""Constructs an instance representing an explicitly set range."""
obj = cls(blip)
obj._begin = begin
obj._end = end
obj._hits = lambda: [(begin, end)]
obj._params = {'range': {'start': begin, 'end': end}}
return obj
def _elem_matches(self, elem, clz, **restrictions):
if not isinstance(elem, clz):
return False
for key, val in restrictions.items():
if getattr(elem, key) != val:
return False
return True
def _find(self, what, maxres=-1, **restrictions):
"""Iterates where 'what' occurs in the associated blip.
What can be either a string or a class reference.
Examples:
self._find('hello') will return the first occurence of the word hello
self._find(element.Gadget, url='http://example.com/gadget.xml')
will return the first gadget that has as url example.com.
Args:
what: what to search for. Can be a class or a string. The class
should be an element from element.py
maxres: number of results to return at most, or <= 0 for all.
restrictions: if what specifies a class, further restrictions
of the found instances.
Yields:
Tuples indicating the range of the matches. For a one
character/element match at position x, (x, x+1) is yielded.
"""
blip = self._blip
if what is None:
yield 0, len(blip)
raise StopIteration
if isinstance(what, basestring):
idx = blip._content.find(what)
count = 0
while idx != -1:
yield idx, idx + len(what)
count += 1
if count == maxres:
raise StopIteration
idx = blip._content.find(what, idx + len(what))
else:
count = 0
for idx, el in blip._elements.items():
if self._elem_matches(el, what, **restrictions):
yield idx, idx + 1
count += 1
if count == maxres:
raise StopIteration
def _execute(self, modify_how, what, bundled_annotations=None):
"""Executes this BlipRefs object.
Args:
modify_how: What to do. Any of the operation declared at the top.
what: Depending on the operation. For delete, has to be None.
For the others it is a singleton, a list or a function returning
what to do; for ANNOTATE tuples of (key, value), for the others
either string or elements.
If what is a function, it takes three parameters, the content of
the blip, the beginning of the matching range and the end.
bundled_annotations: Annotations to apply immediately.
Raises:
IndexError when trying to access content outside of the blip.
ValueError when called with the wrong values.
Returns:
self for chainability.
"""
blip = self._blip
if modify_how != BlipRefs.DELETE:
if type(what) != list:
what = [what]
next_index = 0
matched = []
# updated_elements is used to store the element type of the
# element to update
updated_elements = []
# For now, if we find one markup, we'll use it everywhere.
next = None
hit_found = False
for start, end in self._hits():
hit_found = True
if start < 0:
start += len(blip)
if end == 0:
end += len(blip)
if end < 0:
end += len(blip)
if len(blip) == 0:
if start != 0 or end != 0:
raise IndexError('Start and end have to be 0 for empty document')
elif start < 0 or end < 1 or start >= len(blip) or end > len(blip):
raise IndexError('Position outside the document')
if modify_how == BlipRefs.DELETE:
for i in range(start, end):
if i in blip._elements:
del blip._elements[i]
blip._delete_annotations(start, end)
blip._shift(end, start - end)
blip._content = blip._content[:start] + blip._content[end:]
else:
if callable(what):
next = what(blip._content, start, end)
matched.append(next)
else:
next = what[next_index]
next_index = (next_index + 1) % len(what)
if isinstance(next, str):
next = util.force_unicode(next)
if modify_how == BlipRefs.ANNOTATE:
key, value = next
blip.annotations._add_internal(key, value, start, end)
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
blip.annotations._delete_internal(next, start, end)
elif modify_how == BlipRefs.UPDATE_ELEMENT:
el = blip._elements.get(start)
if not element:
raise ValueError('No element found at index %s' % start)
# the passing around of types this way feels a bit dirty:
updated_elements.append(element.Element.from_json({'type': el.type,
'properties': next}))
for k, b in next.items():
setattr(el, k, b)
else:
if modify_how == BlipRefs.INSERT:
end = start
elif modify_how == BlipRefs.INSERT_AFTER:
start = end
elif modify_how == BlipRefs.REPLACE:
pass
else:
raise ValueError('Unexpected modify_how: ' + modify_how)
if isinstance(next, element.Element):
text = ' '
else:
text = next
# in the case of a replace, and the replacement text is shorter,
# delete the delta.
if start != end and len(text) < end - start:
blip._delete_annotations(start + len(text), end)
blip._shift(end, len(text) + start - end)
blip._content = blip._content[:start] + text + blip._content[end:]
if bundled_annotations:
end_annotation = start + len(text)
blip._delete_annotations(start, end_annotation)
for key, value in bundled_annotations:
blip.annotations._add_internal(key, value, start, end_annotation)
if isinstance(next, element.Element):
blip._elements[start] = next
# No match found, return immediately without generating op.
if not hit_found:
return
operation = blip._operation_queue.document_modify(blip.wave_id,
blip.wavelet_id,
blip.blip_id)
for param, value in self._params.items():
operation.set_param(param, value)
modify_action = {'modifyHow': modify_how}
if modify_how == BlipRefs.DELETE:
pass
elif modify_how == BlipRefs.UPDATE_ELEMENT:
modify_action['elements'] = updated_elements
elif (modify_how == BlipRefs.REPLACE or
modify_how == BlipRefs.INSERT or
modify_how == BlipRefs.INSERT_AFTER):
if callable(what):
what = matched
if what:
if not isinstance(next, element.Element):
modify_action['values'] = [util.force_unicode(value) for value in what]
else:
modify_action['elements'] = what
elif modify_how == BlipRefs.ANNOTATE:
modify_action['values'] = [x[1] for x in what]
modify_action['annotationKey'] = what[0][0]
elif modify_how == BlipRefs.CLEAR_ANNOTATION:
modify_action['annotationKey'] = what[0]
if bundled_annotations:
modify_action['bundledAnnotations'] = [
{'key': key, 'value': value} for key, value in bundled_annotations]
operation.set_param('modifyAction', modify_action)
return self
def insert(self, what, bundled_annotations=None):
"""Inserts what at the matched positions."""
return self._execute(
BlipRefs.INSERT, what, bundled_annotations=bundled_annotations)
def insert_after(self, what, bundled_annotations=None):
"""Inserts what just after the matched positions."""
return self._execute(
BlipRefs.INSERT_AFTER, what, bundled_annotations=bundled_annotations)
def replace(self, what, bundled_annotations=None):
"""Replaces the matched positions with what."""
return self._execute(
BlipRefs.REPLACE, what, bundled_annotations=bundled_annotations)
def delete(self):
"""Deletes the content at the matched positions."""
return self._execute(BlipRefs.DELETE, None)
def annotate(self, name, value=None):
"""Annotates the content at the matched positions.
You can either specify both name and value to set the
same annotation, or supply as the first parameter something
that yields name/value pairs. The name and value should both be strings.
"""
if value is None:
what = name
else:
what = (name, value)
return self._execute(BlipRefs.ANNOTATE, what)
def clear_annotation(self, name):
"""Clears the annotation at the matched positions."""
return self._execute(BlipRefs.CLEAR_ANNOTATION, name)
def update_element(self, new_values):
"""Update an existing element with a set of new values."""
return self._execute(BlipRefs.UPDATE_ELEMENT, new_values)
def __nonzero__(self):
"""Return whether we have a value."""
for start, end in self._hits():
return True
return False
def value(self):
"""Convenience method to convert a BlipRefs to value of its first match."""
for start, end in self._hits():
if end - start == 1 and start in self._blip._elements:
return self._blip._elements[start]
else:
return self._blip.text[start:end]
raise ValueError('BlipRefs has no values')
def __getattr__(self, attribute):
"""Mirror the getattr of value().
This allows for clever things like
first(IMAGE).url
or
blip.annotate_with(key, value).upper()
"""
return getattr(self.value(), attribute)
def __radd__(self, other):
"""Make it possible to add this to a string."""
return other + self.value()
def __cmp__(self, other):
"""Support comparision with target."""
return cmp(self.value(), other)
def __iter__(self):
for start_end in self._hits():
yield start_end
class Blip(object):
"""Models a single blip instance.
Blips are essentially the documents that make up a conversation. Blips can
live in a hierarchy of blips. A root blip has no parent blip id, but all
blips have the ids of the wave and wavelet that they are associated with.
Blips also contain annotations, content and elements, which are accessed via
the Document object.
"""
def __init__(self, json, other_blips, operation_queue):
"""Inits this blip with JSON data.
Args:
json: JSON data dictionary from Wave server.
other_blips: A dictionary like object that can be used to resolve
ids of blips to blips.
operation_queue: an OperationQueue object to store generated operations
in.
"""
self._blip_id = json.get('blipId')
self._operation_queue = operation_queue
self._child_blip_ids = set(json.get('childBlipIds', []))
self._content = json.get('content', '')
self._contributors = set(json.get('contributors', []))
self._creator = json.get('creator')
self._last_modified_time = json.get('lastModifiedTime', 0)
self._version = json.get('version', 0)
self._parent_blip_id = json.get('parentBlipId')
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
if isinstance(other_blips, Blips):
self._other_blips = other_blips
else:
self._other_blips = Blips(other_blips)
self._annotations = Annotations(operation_queue, self)
for annjson in json.get('annotations', []):
r = annjson['range']
self._annotations._add_internal(annjson['name'],
annjson['value'],
r['start'],
r['end'])
self._elements = {}
json_elements = json.get('elements', {})
for elem in json_elements:
self._elements[int(elem)] = element.Element.from_json(json_elements[elem])
self.raw_data = json
@property
def blip_id(self):
"""The id of this blip."""
return self._blip_id
@property
def wave_id(self):
"""The id of the wave that this blip belongs to."""
return self._wave_id
@property
def wavelet_id(self):
"""The id of the wavelet that this blip belongs to."""
return self._wavelet_id
@property
def child_blip_ids(self):
"""The set of the ids of this blip's children."""
return self._child_blip_ids
@property
def child_blips(self):
"""The set of blips that are children of this blip."""
return set([self._other_blips[blid_id] for blid_id in self._child_blip_ids
if blid_id in self._other_blips])
@property
def contributors(self):
"""The set of participant ids that contributed to this blip."""
return self._contributors
@property
def creator(self):
"""The id of the participant that created this blip."""
return self._creator
@property
def last_modified_time(self):
"""The time in seconds since epoch when this blip was last modified."""
return self._last_modified_time
@property
def version(self):
"""The version of this blip."""
return self._version
@property
def parent_blip_id(self):
"""The parent blip_id or None if this is the root blip."""
return self._parent_blip_id
@property
def parent_blip(self):
"""The parent blip or None if it is the root."""
# if parent_blip_id is None, get will also return None
return self._other_blips.get(self._parent_blip_id)
@property
def inline_blip_offset(self):
"""The offset in the parent if this blip is inline or -1 if not.
If the parent is not in the context, this function will always
return -1 since it can't determine the inline blip status.
"""
parent = self.parent_blip
if not parent:
return -1
for offset, el in parent._elements.items():
if el.type == element.Element.INLINE_BLIP_TYPE and el.id == self.blip_id:
return offset
return -1
def is_root(self):
"""Returns whether this is the root blip of a wavelet."""
return self._parent_blip_id is None
@property
def annotations(self):
"""The annotations for this document."""
return self._annotations
@property
def elements(self):
"""Returns a list of elements for this document.
The elements of a blip are things like forms elements and gadgets
that cannot be expressed as plain text. In the text of the blip, you'll
typically find a space as a place holder for the element.
If you want to retrieve the element at a particular index in the blip, use
blip[index].value().
"""
return self._elements.values()
def __len__(self):
return len(self._content)
def __getitem__(self, item):
"""returns a BlipRefs for the given slice."""
if isinstance(item, slice):
if item.step:
raise errors.Error('Step not supported for blip slices')
return self.range(item.start, item.stop)
else:
return self.at(item)
def __setitem__(self, item, value):
"""short cut for self.range/at().replace(value)."""
self.__getitem__(item).replace(value)
def __delitem__(self, item):
"""short cut for self.range/at().delete()."""
self.__getitem__(item).delete()
def _shift(self, where, inc):
"""Move element and annotations after 'where' up by 'inc'."""
new_elements = {}
for idx, el in self._elements.items():
if idx >= where:
idx += inc
new_elements[idx] = el
self._elements = new_elements
self._annotations._shift(where, inc)
def _delete_annotations(self, start, end):
"""Delete all annotations between 'start' and 'end'."""
for annotation_name in self._annotations.names():
self._annotations._delete_internal(annotation_name, start, end)
def all(self, findwhat=None, maxres=-1, **restrictions):
"""Returns a BlipRefs object representing all results for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, maxres, **restrictions)
def first(self, findwhat=None, **restrictions):
"""Returns a BlipRefs object representing the first result for the search.
If searching for an element, the restrictions can be used to specify
additional element properties to filter on, like the url of a Gadget.
"""
return BlipRefs.all(self, findwhat, 1, **restrictions)
def at(self, index):
"""Returns a BlipRefs object representing a 1-character range."""
return BlipRefs.range(self, index, index + 1)
def range(self, start, end):
"""Returns a BlipRefs object representing the range."""
return BlipRefs.range(self, start, end)
def serialize(self):
"""Return a dictionary representation of this blip ready for json."""
return {'blipId': self._blip_id,
'childBlipIds': list(self._child_blip_ids),
'content': self._content,
'creator': self._creator,
'contributors': list(self._contributors),
'lastModifiedTime': self._last_modified_time,
'version': self._version,
'parentBlipId': self._parent_blip_id,
'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'annotations': self._annotations.serialize(),
'elements': dict([(index, e.serialize())
for index, e in self._elements.items()])
}
def proxy_for(self, proxy_for_id):
"""Return a view on this blip that will proxy for the specified id.
A shallow copy of the current blip is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
"""
operation_queue = self._operation_queue.proxy_for(proxy_for_id)
res = Blip(json={},
other_blips={},
operation_queue=operation_queue)
res._blip_id = self._blip_id
res._child_blip_ids = self._child_blip_ids
res._content = self._content
res._contributors = self._contributors
res._creator = self._creator
res._last_modified_time = self._last_modified_time
res._version = self._version
res._parent_blip_id = self._parent_blip_id
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._other_blips = self._other_blips
res._annotations = self._annotations
res._elements = self._elements
res.raw_data = self.raw_data
return res
@property
def text(self):
"""Returns the raw text content of this document."""
return self._content
def find(self, what, **restrictions):
"""Iterate to matching bits of contents.
Yield either elements or pieces of text.
"""
br = BlipRefs.all(self, what, **restrictions)
for start, end in br._hits():
if end - start == 1 and start in self._elements:
yield self._elements[start]
else:
yield self._content[start:end]
raise StopIteration
def append(self, what, bundled_annotations=None):
"""Convenience method covering a common pattern."""
return BlipRefs.all(self, findwhat=None).insert_after(
what, bundled_annotations=bundled_annotations)
def reply(self):
"""Create and return a reply to this blip."""
blip_data = self._operation_queue.blip_create_child(self.wave_id,
self.wavelet_id,
self.blip_id)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
def append_markup(self, markup):
"""Interpret the markup text as xhtml and append the result to the doc.
Args:
markup: The markup'ed text to append.
"""
markup = util.force_unicode(markup)
self._operation_queue.document_append_markup(self.wave_id,
self.wavelet_id,
self.blip_id,
markup)
self._content += util.parse_markup(markup)
def insert_inline_blip(self, position):
"""Inserts an inline blip into this blip at a specific position.
Args:
position: Position to insert the blip at. This has to be greater than 0.
Returns:
The JSON data of the blip that was created.
"""
if position <= 0:
raise IndexError(('Illegal inline blip position: %d. Position has to ' +
'be greater than 0.') % position)
blip_data = self._operation_queue.document_inline_blip_insert(
self.wave_id,
self.wavelet_id,
self.blip_id,
position)
new_blip = Blip(blip_data, self._other_blips, self._operation_queue)
self._other_blips._add(new_blip)
return new_blip
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines classes that are needed to model a wavelet."""
import blip
import errors
import util
class DataDocs(object):
"""Class modeling a bunch of data documents in pythonic way."""
def __init__(self, init_docs, wave_id, wavelet_id, operation_queue):
self._docs = init_docs
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __iter__(self):
return self._docs.__iter__()
def __contains__(self, key):
return key in self._docs
def __delitem__(self, key):
if not key in self._docs:
return
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, None)
del self._docs[key]
def __getitem__(self, key):
return self._docs[key]
def __setitem__(self, key, value):
self._operation_queue.wavelet_datadoc_set(
self._wave_id, self._wavelet_id, key, value)
if value is None and key in self._docs:
del self._docs[key]
else:
self._docs[key] = value
def __len__(self):
return len(self._docs)
def keys(self):
return self._docs.keys()
def serialize(self):
"""Returns a dictionary of the data documents."""
return self._docs
class Participants(object):
"""Class modelling a set of participants in pythonic way."""
#: Designates full access (read/write) role.
ROLE_FULL = "FULL"
#: Designates read-only role.
ROLE_READ_ONLY = "READ_ONLY"
def __init__(self, participants, roles, wave_id, wavelet_id, operation_queue):
self._participants = set(participants)
self._roles = roles.copy()
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __contains__(self, participant):
return participant in self._participants
def __len__(self):
return len(self._participants)
def __iter__(self):
return self._participants.__iter__()
def add(self, participant_id):
"""Adds a participant by their ID (address)."""
self._operation_queue.wavelet_add_participant(
self._wave_id, self._wavelet_id, participant_id)
self._participants.add(participant_id)
def get_role(self, participant_id):
"""Return the role for the given participant_id."""
return self._roles.get(participant_id, Participants.ROLE_FULL)
def set_role(self, participant_id, role):
"""Sets the role for the given participant_id."""
if role != Participants.ROLE_FULL and role != Participants.ROLE_READ_ONLY:
raise ValueError(role + ' is not a valid role')
self._operation_queue.wavelet_modify_participant_role(
self._wave_id, self._wavelet_id, participant_id, role)
self._roles[participant_id] = role
def serialize(self):
"""Returns a list of the participants."""
return list(self._participants)
class Tags(object):
"""Class modelling a list of tags."""
def __init__(self, tags, wave_id, wavelet_id, operation_queue):
self._tags = list(tags)
self._wave_id = wave_id
self._wavelet_id = wavelet_id
self._operation_queue = operation_queue
def __getitem__(self, index):
return self._tags[index]
def __len__(self):
return len(self._tags)
def __iter__(self):
return self._tags.__iter__()
def append(self, tag):
"""Appends a tag if it doesn't already exist."""
tag = util.force_unicode(tag)
if tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag)
self._tags.append(tag)
def remove(self, tag):
"""Removes a tag if it exists."""
tag = util.force_unicode(tag)
if not tag in self._tags:
return
self._operation_queue.wavelet_modify_tag(
self._wave_id, self._wavelet_id, tag, modify_how='remove')
self._tags.remove(tag)
def serialize(self):
"""Returns a list of tags."""
return list(self._tags)
class Wavelet(object):
"""Models a single wavelet.
A single wavelet is composed of metadata, participants, and its blips.
To guarantee that all blips are available, specify Context.ALL for events.
"""
def __init__(self, json, blips, robot, operation_queue):
"""Inits this wavelet with JSON data.
Args:
json: JSON data dictionary from Wave server.
blips: a dictionary object that can be used to resolve blips.
robot: the robot owning this wavelet.
operation_queue: an OperationQueue object to be used to
send any generated operations to.
"""
self._robot = robot
self._operation_queue = operation_queue
self._wave_id = json.get('waveId')
self._wavelet_id = json.get('waveletId')
self._creator = json.get('creator')
self._creation_time = json.get('creationTime', 0)
self._data_documents = DataDocs(json.get('dataDocuments', {}),
self._wave_id,
self._wavelet_id,
operation_queue)
self._last_modified_time = json.get('lastModifiedTime')
self._participants = Participants(json.get('participants', []),
json.get('participantRoles', {}),
self._wave_id,
self._wavelet_id,
operation_queue)
self._title = json.get('title', '')
self._tags = Tags(json.get('tags', []),
self._wave_id,
self._wavelet_id,
operation_queue)
self._raw_data = json
self._blips = blip.Blips(blips)
self._root_blip_id = json.get('rootBlipId')
if self._root_blip_id and self._root_blip_id in self._blips:
self._root_blip = self._blips[self._root_blip_id]
else:
self._root_blip = None
self._robot_address = None
@property
def wavelet_id(self):
"""Returns this wavelet's id."""
return self._wavelet_id
@property
def wave_id(self):
"""Returns this wavelet's parent wave id."""
return self._wave_id
@property
def creator(self):
"""Returns the participant id of the creator of this wavelet."""
return self._creator
@property
def creation_time(self):
"""Returns the time that this wavelet was first created in milliseconds."""
return self._creation_time
@property
def data_documents(self):
"""Returns the data documents for this wavelet based on key name."""
return self._data_documents
@property
def domain(self):
"""Return the domain that wavelet belongs to."""
p = self._wave_id.find('!')
if p == -1:
return None
else:
return self._wave_id[:p]
@property
def last_modified_time(self):
"""Returns the time that this wavelet was last modified in ms."""
return self._last_modified_time
@property
def participants(self):
"""Returns a set of participants on this wavelet."""
return self._participants
@property
def tags(self):
"""Returns a list of tags for this wavelet."""
return self._tags
@property
def robot(self):
"""The robot that owns this wavelet."""
return self._robot
def _get_title(self):
return self._title
def _set_title(self, title):
title = util.force_unicode(title)
if title.find('\n') != -1:
raise errors.Error('Wavelet title should not contain a newline ' +
'character. Specified: ' + title)
self._operation_queue.wavelet_set_title(self.wave_id, self.wavelet_id,
title)
self._title = title
# Adjust the content of the root blip, if it is available in the context.
if self._root_blip:
content = '\n'
splits = self._root_blip._content.split('\n', 2)
if len(splits) == 3:
content += splits[2]
self._root_blip._content = '\n' + title + content
#: Returns or sets the wavelet's title.
title = property(_get_title, _set_title,
doc='Get or set the title of the wavelet.')
def _get_robot_address(self):
return self._robot_address
def _set_robot_address(self, address):
if self._robot_address:
raise errors.Error('robot address already set')
self._robot_address = address
robot_address = property(_get_robot_address, _set_robot_address,
doc='Get or set the address of the current robot.')
@property
def root_blip(self):
"""Returns this wavelet's root blip."""
return self._root_blip
@property
def blips(self):
"""Returns the blips for this wavelet."""
return self._blips
def get_operation_queue(self):
"""Returns the OperationQueue for this wavelet."""
return self._operation_queue
def serialize(self):
"""Return a dict of the wavelet properties."""
return {'waveId': self._wave_id,
'waveletId': self._wavelet_id,
'creator': self._creator,
'creationTime': self._creation_time,
'dataDocuments': self._data_documents.serialize(),
'lastModifiedTime': self._last_modified_time,
'participants': self._participants.serialize(),
'title': self._title,
'blips': self._blips.serialize(),
'rootBlipId': self._root_blip_id
}
def proxy_for(self, proxy_for_id):
"""Return a view on this wavelet that will proxy for the specified id.
A shallow copy of the current wavelet is returned with the proxy_for_id
set. Any modifications made to this copy will be done using the
proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will
be used.
If the wavelet was retrieved using the Active Robot API, that is
by fetch_wavelet, then the address of the robot must be added to the
wavelet by setting wavelet.robot_address before calling proxy_for().
"""
self.add_proxying_participant(proxy_for_id)
operation_queue = self.get_operation_queue().proxy_for(proxy_for_id)
res = Wavelet(json={},
blips={},
robot=self.robot,
operation_queue=operation_queue)
res._wave_id = self._wave_id
res._wavelet_id = self._wavelet_id
res._creator = self._creator
res._creation_time = self._creation_time
res._data_documents = self._data_documents
res._last_modified_time = self._last_modified_time
res._participants = self._participants
res._title = self._title
res._raw_data = self._raw_data
res._blips = self._blips
res._root_blip = self._root_blip
return res
def add_proxying_participant(self, id):
"""Ads a proxying participant to the wave.
Proxying participants are of the form robot+proxy@domain.com. This
convenience method constructs this id and then calls participants.add.
"""
if not self.robot_address:
raise errors.Error(
'Need a robot address to add a proxying for participant')
robotid, domain = self.robot_address.split('@', 1)
if '#' in robotid:
robotid, version = robotid.split('#')
else:
version = None
if '+' in robotid:
newid = robotid.split('+', 1)[0] + '+' + id
else:
newid = robotid + '+' + id
if version:
newid += '#' + version
newid += '@' + domain
self.participants.add(newid)
def submit_with(self, other_wavelet):
"""Submit this wavelet when the passed other wavelet is submited.
wavelets constructed outside of the event callback need to
be either explicitly submited using robot.submit(wavelet) or be
associated with a different wavelet that will be submited or
is part of the event callback.
"""
other_wavelet._operation_queue.copy_operations(self._operation_queue)
self._operation_queue = other_wavelet._operation_queue
def reply(self, initial_content=None):
"""Replies to the conversation in this wavelet.
Args:
initial_content: If set, start with this (string) content.
Returns:
A transient version of the blip that contains the reply.
"""
if not initial_content:
initial_content = u'\n'
initial_content = util.force_unicode(initial_content)
blip_data = self._operation_queue.wavelet_append_blip(
self.wave_id, self.wavelet_id, initial_content)
instance = blip.Blip(blip_data, self._blips, self._operation_queue)
self._blips._add(instance)
return instance
def delete(self, todelete):
"""Remove a blip from this wavelet.
Args:
todelete: either a blip or a blip id to be removed.
"""
if isinstance(todelete, blip.Blip):
blip_id = todelete.blip_id
else:
blip_id = todelete
self._operation_queue.blip_delete(self.wave_id, self.wavelet_id, blip_id)
self._blips._remove_with_id(blip_id)
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the util module."""
__author__ = 'davidbyttow@google.com (David Byttow)'
import unittest
import ops
import util
class TestUtils(unittest.TestCase):
"""Tests utility functions."""
def testIsIterable(self):
self.assertTrue(util.is_iterable([]))
self.assertTrue(util.is_iterable({}))
self.assertTrue(util.is_iterable(set()))
self.assertTrue(util.is_iterable(()))
self.assertFalse(util.is_iterable(42))
self.assertFalse(util.is_iterable('list?'))
self.assertFalse(util.is_iterable(object))
def testIsDict(self):
self.assertFalse(util.is_dict([]))
self.assertTrue(util.is_dict({}))
self.assertFalse(util.is_dict(set()))
self.assertFalse(util.is_dict(()))
self.assertFalse(util.is_dict(42))
self.assertFalse(util.is_dict('dict?'))
self.assertFalse(util.is_dict(object))
def testIsUserDefinedNewStyleClass(self):
class OldClass:
pass
class NewClass(object):
pass
self.assertFalse(util.is_user_defined_new_style_class(OldClass()))
self.assertTrue(util.is_user_defined_new_style_class(NewClass()))
self.assertFalse(util.is_user_defined_new_style_class({}))
self.assertFalse(util.is_user_defined_new_style_class(()))
self.assertFalse(util.is_user_defined_new_style_class(42))
self.assertFalse(util.is_user_defined_new_style_class('instance?'))
def testLowerCamelCase(self):
self.assertEquals('foo', util.lower_camel_case('foo'))
self.assertEquals('fooBar', util.lower_camel_case('foo_bar'))
self.assertEquals('fooBar', util.lower_camel_case('fooBar'))
self.assertEquals('blipId', util.lower_camel_case('blip_id'))
self.assertEquals('fooBar', util.lower_camel_case('foo__bar'))
self.assertEquals('fooBarBaz', util.lower_camel_case('foo_bar_baz'))
self.assertEquals('f', util.lower_camel_case('f'))
self.assertEquals('f', util.lower_camel_case('f_'))
self.assertEquals('', util.lower_camel_case(''))
self.assertEquals('', util.lower_camel_case('_'))
self.assertEquals('aBCDEF', util.lower_camel_case('_a_b_c_d_e_f_'))
def assertListsEqual(self, a, b):
self.assertEquals(len(a), len(b))
for i in range(len(a)):
self.assertEquals(a[i], b[i])
def assertDictsEqual(self, a, b):
self.assertEquals(len(a.keys()), len(b.keys()))
for k, v in a.iteritems():
self.assertEquals(v, b[k])
def testSerializeList(self):
data = [1, 2, 3]
output = util.serialize(data)
self.assertListsEqual(data, output)
def testSerializeDict(self):
data = {'key': 'value', 'under_score': 'value2'}
expected = {'key': 'value', 'underScore': 'value2'}
output = util.serialize(data)
self.assertDictsEqual(expected, output)
def testNonNoneDict(self):
a = {'a': 1, 'b': 1}
self.assertDictsEqual(a, util.non_none_dict(a))
b = a.copy()
b['c'] = None
self.assertDictsEqual(a, util.non_none_dict(b))
def testForceUnicode(self):
self.assertEquals(u"aaa", util.force_unicode("aaa"))
self.assertEquals(u"12", util.force_unicode(12))
self.assertEquals(u"\u0430\u0431\u0432",
util.force_unicode("\xd0\xb0\xd0\xb1\xd0\xb2"))
self.assertEquals(u'\u30e6\u30cb\u30b3\u30fc\u30c9',
util.force_unicode(u'\u30e6\u30cb\u30b3\u30fc\u30c9'))
def testSerializeAttributes(self):
class Data(object):
def __init__(self):
self.public = 1
self._protected = 2
self.__private = 3
def Func(self):
pass
data = Data()
output = util.serialize(data)
# Functions and non-public fields should not be serialized.
self.assertEquals(1, len(output.keys()))
self.assertEquals(data.public, output['public'])
def testStringEnum(self):
util.StringEnum()
single = util.StringEnum('foo')
self.assertEquals('foo', single.foo)
multi = util.StringEnum('foo', 'bar')
self.assertEquals('foo', multi.foo)
self.assertEquals('bar', multi.bar)
def testParseMarkup(self):
self.assertEquals('foo', util.parse_markup('foo'))
self.assertEquals('foo bar', util.parse_markup('foo <b>bar</b>'))
self.assertEquals('foo\nbar', util.parse_markup('foo<br>bar'))
self.assertEquals('foo\nbar', util.parse_markup('foo<p indent="3">bar'))
if __name__ == '__main__':
unittest.main()
| Python |
# handler_xmpp.py
#
#
""" xmpp request handler. """
## jsb imports
from jsb.utils.generic import fromenc, toenc
from jsb.version import getversion
from jsb.utils.lazydict import LazyDict
from jsb.utils.exception import handle_exception
from jsb.lib.plugins import plugs
from jsb.lib.boot import boot, plugin_packages
## gaelib imports
from jsb.drivers.gae.xmpp.bot import XMPPBot
from jsb.drivers.gae.xmpp.event import XMPPEvent
from jsb.utils.gae.auth import checkuser
## google imports
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users as gusers
from google.appengine.ext import db
from google.appengine.ext.webapp import xmpp_handlers
## basic imports
import wsgiref.handlers
import sys
import time
import types
import logging
## boot
logging.info(getversion('GAE XMPP'))
boot()
## defines
bot = XMPPBot()
## functions
def xmppbox(response):
response.out.write("""
<form action="/_ah/xmpp/message/chat/" method="post">
<div><b>enter command:</b> <input type="commit" name="body">
</form>
""")
## classes
class XMPPHandler(webapp.RequestHandler):
""" relay incoming messages to the bot. """
def post(self):
try:
logging.info("XMPP incoming: %s" % self.request.remote_addr)
if not self.request.POST.has_key('from'):
logging.debug('no from in POST: %s' % str(self.request.POST))
return
if not self.request.POST.has_key('to'):
logging.debug('no to in POST: %s' % str(self.request.POST))
return
event = XMPPEvent(bot=bot).parse(self.request, self.response)
bot.doevent(event)
except Exception, ex:
handle_exception()
#self.send_error(500)
application = webapp.WSGIApplication([('/_ah/xmpp/message/chat/', XMPPHandler), ],
debug=True)
def main():
global application
global bot
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# handler_warmup.py
#
#
""" xmpp request handler. """
## jsb imports
from jsb.version import getversion
## google imports
import webapp2
## basic imports
import sys
import time
import types
import logging
## greet
logging.warn(getversion('WARMUP'))
## classes
class WarmupHandler(webapp2.RequestHandler):
def get(self, url=None):
logging.warn("warmup")
post = get
application = webapp2.WSGIApplication([webapp2.Route(r'<url:.*>', WarmupHandler)],
debug=True)
def main():
global application
application.run()
if __name__ == "__main__":
main()
| Python |
# handler_hubbub.py
#
#
## jsb imports
from jsb.contrib import feedparser
from jsb.version import getversion
from jsb.lib.plugins import plugs
## google imports
from google.appengine.api import urlfetch
from google.appengine.api import xmpp
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import xmpp_handlers
## basic imports
import base64
import logging
import urllib
import urlparse
import uuid
logging.warn(getversion('GAE HUBBUB'))
if not plugs.has_key("jsb.plugs.common.hubbub"):
p = plugs.load("jsb.plugs.common.hubbub")
else:
p = plugs["jsb.plugs.common.hubbub"]
class CallbackHandler(webapp.RequestHandler):
def get(self):
logging.warn('hubbub - incoming GET')
if self.request.GET['hub.mode'] == 'unsubscribe':
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(self.request.GET['hub.challenge'])
return
if self.request.GET['hub.mode'] != 'subscribe':
self.error(400)
return
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(self.request.GET['hub.challenge'])
def post(self):
"""Handles new content notifications."""
logging.warn("hubbub - incoming POST")
try:
p.watcher.incoming(self.request.body)
except IndexError:
logging.error("hubbub plugin did not load properly")
except Exception, ex:
handle_exception()
self.send_error(500)
application = webapp.WSGIApplication([('/(?:hubbub)', CallbackHandler)], debug=False)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
# handler_web.py
#
#
""" web request handler. """
import time
import logging
## jsb imports
from jsb.version import getversion
from jsb.utils.exception import handle_exception
from jsb.lib.channelbase import ChannelBase
## gaelib import
from jsb.utils.gae.auth import finduser
from jsb.utils.gae.web import start, closer, loginurl, logouturl, login
## google imports
from webapp2 import RequestHandler, Route, WSGIApplication
from google.appengine.api import channel
## basic imports
import sys
import time
import types
import os
import logging
import google
## init
logging.info(getversion('GAE WEB'))
## classes
class HomePageHandler(RequestHandler):
""" the bots web command dispatcher. """
def options(self):
self.response.headers.add_header("Allow: *")
def get(self):
""" show basic page. """
logging.warn("web_handler - in")
try:
user = finduser()
if not user:
login(self.response, {'appname': 'JSONBOT' , 'who': 'not logged in yet', 'loginurl': 'not logged in', 'logouturl': 'JSONBOT', 'onload': 'consoleinit();'})
else:
logout = logouturl(self.request, self.response)
#channel = ChannelBase(user, botname="gae-web")
#(chan, token) = channel.gae_create()
#logging.warn("%s - %s" % (chan, token))
#channel.save()
start(self.response, {'appname': 'JSONBOT' , 'who': user, 'loginurl': 'logged in', 'logouturl': logout, 'onload': 'consoleinit();'})
except google.appengine.runtime.DeadlineExceededError:
self.response.out.write("DeadLineExceededError .. this request took too long to finish.")
except Exception, ex:
self.response.out.write("An exception occured: %s" % str(ex))
handle_exception()
try: os._exit(1)
except: pass
logging.warn("web_handler - out")
## the application
application = WSGIApplication([('/', HomePageHandler)],
debug=True)
## main
def main():
global application
application.run()
if __name__ == "__main__":
main()
| Python |
# handler_wave.py
#
#
""" this handler handles all the wave jsonrpc requests. """
## jsb imports
from jsb.version import getversion
from jsb.lib.errors import NoSuchCommand
from jsb.lib.boot import boot
## gaelib imports
from jsb.drivers.gae.wave.bot import WaveBot
## basic imports
import logging
import os
## defines
logging.info(getversion('GAE WAVE'))
boot()
# the bot
bot = WaveBot(domain="googlewave.com")
def main():
bot.run()
if __name__ == "__main__":
main()
| Python |
# jsb/upload/handler_openid.py
#
#
""" Openid handler. """
## jsb imports
from jsb.utils.gae.web import loginurl
from jsb.version import getversion
from jsb.utils.exception import handle_exception
from jsb.utils.lazydict import LazyDict
## google imports
from google.appengine.api import users
import webapp2 as webapp
## basic imports
import os
import logging
import urlparse
import urllib
import warnings
import socket
## classes
class OpenIdLoginHandler(webapp.RequestHandler):
def create_openid_url(self, continue_url):
continue_url = urlparse.urljoin(self.request.url, continue_url)
return "/_ah/login?continue=%s" % urllib.quote(continue_url)
def get(self):
try:
cont = self.request.get('continue')
logging.info('openid - login form %s' % cont)
urlstring = self.create_openid_url(cont)
template_values = LazyDict({
'continue': cont,
'urlstring': urlstring,
'appname': getversion()
})
try: host = socket.gethostname()
except AttributeError:
if os.environ.get('HTTP_HOST'): host = os.environ['HTTP_HOST']
else: host = os.environ['SERVER_NAME']
inputdict = {'version': getversion(), 'host': host}
template_values.update(inputdict)
temp = os.path.join(os.getcwd(), 'templates/login.html')
outstr = template_values.render(temp)
self.response.out.write(outstr)
except Exception, ex:
handle_exception()
def post(self):
try:
cont = self.request.get('continue')
conturl = self.create_openid_url(cont)
logging.info('openid - %s' % cont)
openid = self.request.get('openid_identifier')
if openid:
login_url = users.create_login_url(cont, None, openid)
logging.info('openid - redirecting to url %s (%s)' % (login_url, openid))
self.redirect(login_url)
else:
logging.warn("denied access for %s - %s - %s" % (self.request.remote_addr, cont, openid))
self.send_error(400)
except Exception, ex:
handle_exception()
## the application
application = webapp.WSGIApplication([
('/_ah/login_required', OpenIdLoginHandler)],
debug=True)
## main
def main():
global application
application.run()
if __name__ == "__main__":
main()
| Python |
# handler_task.py
#
#
""" jsb task handler. """
## jsb imports
from jsb.lib.plugins import plugs
from jsb.version import getversion
from jsb.utils.exception import handle_exception
from jsb.lib.tasks import taskmanager
## google imports
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
## simplejson import
from waveapi.simplejson import loads
## basic imports
import wsgiref.handlers
import logging
import google
## vars
periodicals = ['jsb.plugs.common.rss', 'jsb.plugs.core.botevent']
mountpoints = ['rss', 'botevent']
##
logging.info(getversion('TASK'))
for plugin in periodicals:
plugs.reload(plugin)
class TaskHandler(webapp.RequestHandler):
""" the bots task handler. """
def get(self):
try:
""" this is where the task gets dispatched. """
path = self.request.path
if path.endswith('/'):
path = path[:-1]
taskname = path.split('/')[-1].strip()
logging.debug("using taskname: %s" % taskname)
inputdict = {}
for name, value in self.request.environ.iteritems():
if not 'wsgi' in name:
inputdict[name] = value
taskmanager.dispatch(taskname, inputdict, self.request, self.response)
except google.appengine.runtime.DeadlineExceededError:
return
except Exception, ex:
handle_exception()
#self.send_error(500)
def post(self):
""" this is where the task gets dispatched. """
try:
path = self.request.path
if path.endswith('/'):
path = path[:-1]
taskname = path.split('/')[-1].strip()
logging.debug("using taskname: %s taken from %s" % (taskname, path))
if not taskname:
return
inputdict = {}
for name, value in self.request.environ.iteritems():
if not 'wsgi' in name:
inputdict[name] = value
taskmanager.dispatch(taskname, inputdict, self.request, self.response)
except google.appengine.runtime.DeadlineExceededError:
return
except Exception, ex:
handle_exception()
#self.send_error(500)
# the application
mountlist = []
for mount in mountpoints:
mountlist.append(('/tasks/%s' % mount, TaskHandler))
application = webapp.WSGIApplication(mountlist, debug=True)
def main():
global application
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# jsb common plugins
#
#
""" this package contains all the plugins common to all drivers. """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
# jsb common plugins
#
#
""" this package contains all the plugins common to all drivers. """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
# jsb common plugins
#
#
""" this package contains all the plugins common to all drivers. """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
# jsb common plugins
#
#
""" this package contains all the plugins common to all drivers. """
import os
(f, tail) = os.path.split(__file__)
__all__ = []
for i in os.listdir(f):
if i.endswith('.py'):
__all__.append(i[:-3])
elif os.path.isdir(f + os.sep + i) and not i.startswith('.'):
__all__.append(i)
try:
__all__.remove('__init__')
except:
pass
__plugs__ = __all__
| Python |
# jsb/utils/trace.py
#
#
""" trace related functions """
## basic imports
import sys
import os
## defines
stopmarkers = ['waveapi', 'jsb', 'myplugs', 'python2.5', 'python2.6']
## calledfrom function
def calledfrom(frame):
""" return the plugin name where given frame occured. """
try:
filename = frame.f_back.f_code.co_filename
plugfile = filename.split(os.sep)
if plugfile:
mod = []
for i in plugfile[::-1]:
mod.append(i)
if i in stopmarkers: break
modstr = '.'.join(mod[::-1])[:-3]
if 'handler_' in modstr: modstr = modstr.split('.')[-1]
except AttributeError: modstr = None
del frame
return modstr
## callstack function
def callstack(frame):
""" return callstack trace as a string. """
result = []
loopframe = frame
while 1:
try:
filename = loopframe.f_back.f_code.co_filename
plugfile = filename.split(os.sep)
if plugfile:
mod = []
for i in plugfile[::-1]:
mod.append(i)
if i in stopmarkers: break
modstr = '.'.join(mod[::-1])[:-3]
if 'handler_' in modstr: modstr = modstr.split('.')[-1]
result.append("%s:%s" % (modstr, loopframe.f_back.f_lineno))
loopframe = loopframe.f_back
except: break
del frame
return result
## whichmodule function
def whichmodule(depth=1):
""" return filename:lineno of the module. """
try:
frame = sys._getframe(depth)
plugfile = frame.f_back.f_code.co_filename[:-3].split('/')
lineno = frame.f_back.f_lineno
mod = []
for i in plugfile[::-1]:
mod.append(i)
if i in stopmarkers: break
modstr = '.'.join(mod[::-1]) + ':' + str(lineno)
if 'handler_' in modstr: modstr = modstr.split('.')[-1]
except AttributeError: modstr = None
del frame
return modstr
## whichplugin function
def whichplugin(depth=1):
""" return filename:lineno of the module. """
try:
frame = sys._getframe(depth)
plugfile = frame.f_back.f_code.co_filename[:-3].split('/')
lineno = frame.f_back.f_lineno
mod = []
for i in plugfile[::-1]:
mod.append(i)
if i in stopmarkers: break
modstr = '.'.join(mod[::-1])
if 'handler_' in modstr: modstr = modstr.split('.')[-1]
except AttributeError: modstr = None
del frame
return modstr
| Python |
# jsb/utils/opts.py
#
#
""" options related functions. """
## jsb imports
from jsb.lib.errors import NameNotSet
from jsb.lib.config import Config
from jsb.version import getversion
from jsb.utils.name import stripname
## basic imports
import os
import uuid
import logging
import optparse
## EventOptionParser class
class EventOptionParser(optparse.OptionParser):
def exit(self):
pass
def error(self):
pass
## makeopts function
def makeircopts(txt=""):
""" create commandline parser options. """
parser = optparse.OptionParser(usage='usage: %prog [options]', version=getversion())
parser.add_option('', '-r', type='string', default=False, dest='doresume', metavar='PATH',
help="resume the bot from the folder specified")
parser.add_option('-d', '--datadir', type='string', default=False, dest='datadir', help="datadir of the bot")
parser.add_option('-o', '--owner', type='string', default=False, dest='owner', help="owner of the bot")
parser.add_option('-s', '--server', type='string', default=False, dest='server', help="server to connect to (irc)")
parser.add_option('-c', '--channel', type='string', default=False, dest='channel', help="channel to join")
parser.add_option('-l', '--loglevel', type='string', default="", dest='loglevel', help="loglevel of the bot")
parser.add_option('-p', '--password', type='string', default=False, dest='password', help="set password used to connect to the server")
parser.add_option('', '--name', type='string', default=False, dest='name', help="bot's name")
parser.add_option('', '--port', type='string', default=False, dest='port', help="set port of server to connect to")
parser.add_option('', '--save', action='store_true', default=False, dest='save', help="save to config file")
parser.add_option('-n', '--nick', type='string', default=False, dest='nick', help="nick of the bot")
parser.add_option('', '--ssl', action='store_true', default=False, dest='ssl', help="use ssl")
parser.add_option('-y', '--nossl', action='store_true', default=False, dest='nossl', help="don't use ssl")
parser.add_option('-6', '--ipv6', action='store_true', default=False, dest='ipv6', help="enable ipv6 bot")
if txt: opts, args = parser.parse_args(txt.split())
else: opts, args = parser.parse_args()
opts.args = args
return opts
## makeconvoreopts
def makeconvoreopts(txt=""):
""" create commandline parser options. """
parser = optparse.OptionParser(usage='usage: %prog [options]', version=getversion())
parser.add_option('-d', '--datadir', type='string', default=False, dest='datadir', help="datadir of the bot")
parser.add_option('-o', '--owner', type='string', default=False, dest='owner', help="owner of the bot")
parser.add_option('-l', '--loglevel', type='string', default="", dest='loglevel', help="loglevel of the bot")
parser.add_option('-p', '--password', type='string', default=False, dest='password', help="set password used to connect to the server")
parser.add_option('-c', '--channel', type='string', default=False, dest='channel', help="channel to join")
parser.add_option('', '--name', type='string', default=False, dest='name', help="bot's name")
parser.add_option('-u', '--username', type='string', default=False, dest='username', help="username of the bot")
if txt: opts, args = parser.parse_args(txt.split())
else: opts, args = parser.parse_args()
opts.args = args
return opts
## makexmppopts
def makesxmppopts(txt=""):
""" create commandline parser options. """
parser = optparse.OptionParser(usage='usage: %prog [options]', version=getversion())
parser.add_option('', '-r', type='string', default=False, dest='doresume', metavar='PATH',
help="resume the bot from the folder specified")
parser.add_option('-d', '--datadir', type='string', default=False, dest='datadir', help="datadir of the bot")
parser.add_option('-u', '--user', type='string', default=False, dest='user', help="JID of the bot")
parser.add_option('-o', '--owner', type='string', default=False, dest='owner', help="owner of the bot")
parser.add_option('-s', '--server', type='string', default=False, dest='server', help="server to connect to (irc)")
parser.add_option('-c', '--channel', type='string', default=False, dest='channel', help="channel to join")
parser.add_option('-l', '--loglevel', type='string', default="", dest='loglevel', help="loglevel of the bot")
parser.add_option('-p', '--password', type='string', default=False, dest='password', help="set password used to connect to the server")
parser.add_option('', '--name', type='string', default=False, dest='name', help="bot's name")
parser.add_option('', '--port', type='string', default=False, dest='port', help="set port of server to connect to")
parser.add_option('', '--save', action='store_true', default=False, dest='save', help="save to config file")
parser.add_option('-n', '--nick', type='string', default=False, dest='nick', help="nick of the bot")
if txt: opts, args = parser.parse_args(txt.split())
else: opts, args = parser.parse_args()
opts.args = args
return opts
## makeconsoleopts
def makeconsoleopts():
""" create option parser for events. """
parser = optparse.OptionParser(usage='usage: %prog [options]', version=getversion("CONSOLE"))
parser.add_option('-d', '--datadir', type='string', default=False, dest='datadir', help="datadir of the bot")
parser.add_option('-l', '--loglevel', type='string', default="", dest='loglevel', help="loglevel of the bot")
parser.add_option('', '--name', type='string', default=False, dest='name', help="bot's name")
parser.add_option('-x', '--exec', type='string', default="", dest='command', help="give a command to execute")
# parser.add_option('-z', '--forward', action='store_true', default=False, dest='forward', help="enable forwarding bot")
try: opts, args = parser.parse_args()
except Exception, ex: logging.warn("opts - can't parse %s" % txt) ; return
opts.args = args
return opts
## makefleetopts function
def makefleetopts():
""" create option parser for events. """
parser = optparse.OptionParser(usage='usage: %prog [options] [list of bot names]', version=getversion("FLEET"))
parser.add_option('-a', '--all', action='store_true', default=False, dest='all', help="show available fleet bots")
parser.add_option('-s', '--show', action='store_true', default=False, dest='show', help="print available fleet bots")
parser.add_option('-d', '--datadir', type='string', default=False, dest='datadir', help="datadir of the bot")
parser.add_option('-l', '--loglevel', type='string', default="", dest='loglevel', help="loglevel of the bot")
parser.add_option('-o', '--owner', type='string', default=False, dest='owner', help="owner of the bot")
parser.add_option('', '-r', type='string', default=False, dest='doresume', metavar='PATH',
help="resume the bot from the folder specified")
try: opts, args = parser.parse_args()
except Exception, ex: logging.warn("opts - can't parse %s" % txt) ; return
opts.args = args
return opts
## makeeventopts function
def makeeventopts(txt):
""" create option parser for events. """
parser = EventOptionParser()
parser.add_option('-c', '--chan', type='string', default=False, dest='channel', help="target channel")
parser.add_option('-s', '--silent', action='store_true', default=False, dest='silent', help="give bot response in /pm")
try: opts, args = parser.parse_args(txt.split())
except Exception, ex: logging.warn("opts - can't parse %s" % txt) ; return
opts.args = args
return opts
## makeconfig function
def makeconsoleconfig(opts=None, botname=None):
""" make config file based on options. """
if not botname: botname = opts.name or "default-console"
botname = stripname(botname)
cfg = Config('fleet' + os.sep + botname + os.sep + 'config')
cfg.type = "console"
cfg.botname = botname
if opts and opts.loglevel: cfg.loglevel = opts.loglevel
else: cfg.loglevel = cfg.loglevel or "error"
return cfg
## makeircconfig function
def makeircconfig(opts=None, botname=None):
""" make config file based on options. """
if not opts: botname = botname or "default-irc"
else:
if not botname: botname = opts.name or "default-irc"
botname = stripname(botname)
cfg = Config('fleet' + os.sep + botname + os.sep + 'config')
cfg.bottype = 'irc'
cfg.botname = botname
if not opts:
cfg.password = cfg.password or ""
cfg.ssl = cfg.ssl or False
cfg.port = cfg.port or 6667
cfg.server = cfg.server or "localhost"
cfg.owner = cfg.owner or []
cfg.ipv6 = cfg.ipv6 or False
cfg.nick = cfg.nick or "jsb"
return cfg
if opts.password: cfg.password = opts.password
if opts.ssl: cfg.ssl = True
if opts.nossl: cfg.ssl = False
if opts.port: cfg.port = opts.port
else: cfg.port = 6667
if opts.server: cfg.server = opts.server
else: cfg.server = cfg.server or "localhost"
if not cfg.owner: cfg.owner = []
if opts.owner and opts.owner not in cfg.owner: cfg.owner.append(opts.owner)
if opts.ipv6: cfg.ipv6 = opts.ipv6
if opts.nick: cfg.nick = opts.nick
else: cfg.nick = cfg.nick or "jsb"
return cfg
## makexmppconfig function
def makesxmppconfig(opts=None, botname=None):
""" make config file based on options. """
if not opts: botname = botname or "default-sxmpp"
else:
if not botname: botname = opts.name or "default-sxmpp"
botname = stripname(botname)
cfg = Config('fleet' + os.sep + botname + os.sep + 'config')
cfg.type = "sxmpp"
cfg.botname = botname
if not opts:
cfg.user = cfg.user or ""
cfg.host = cfg.host or ""
cfg.password = cfg.passord or ""
cfg.server = cfg.server or ""
cfg.owner = cfg.owner or []
cfg.loglevel = cfg.lowlevel or "warn"
cfg.nick = cfg.nick or "jsb"
return cfg
if opts.user: cfg.user = opts.user
else: cfg.user = cfg.user or "%s@jsonbot.org" % cfg.uuid
if opts.user:
try: cfg.host = opts.user.split('@')[1]
except ValueError: print "user is not in the nick@server format"
if not cfg.host:
try: cfg.host = cfg.user.split('@')[1]
except ValueError: print "user is not in the nick@server format"
if opts.password: cfg.password = opts.password
if opts.server: cfg.server = opts.server
else: cfg.server = cfg.server or "localhost"
if opts.name: cfg.jid = opts.name
if not cfg.owner: cfg.owner = []
if opts.owner and opts.owner not in cfg.owner: cfg.owner.append(opts.owner)
if opts.nick: cfg.nick = opts.nick
else: cfg.nick = cfg.nick or "jsb"
return cfg
| Python |
# jsb/utils/mainloop.py
#
#
""" main loop used in jsb binairies. """
## jsb imports
from jsb.lib.eventhandler import mainhandler
from jsb.lib.exit import globalshutdown
## basic imports
import os
import time
## mainloop function
def mainloop():
""" function to be used as mainloop. """
while 1:
try:
time.sleep(1)
mainhandler.handle_one()
except KeyboardInterrupt: break
except Exception, ex:
handle_exception()
globalshutdown()
os._exit(1)
globalshutdown()
os._exit(0)
| Python |
# jsb/utils/locking.py
#
#
""" locking functions """
## jsb imports
from exception import handle_exception
from trace import whichmodule
from lockmanager import lockmanager, rlockmanager
from jsb.lib.threads import getname
## generic imports
import logging
import sys
## Locked-class
class Locked(object):
""" class used to lock an entire object. UNTESTED"""
def __getattribute__(self, attr):
where = whichmodule(1)
logging.debug('locking - locking on %s' % where)
rlockmanager.acquire(object)
res = None
try: res = super(Locked, self).__getattribute__(attr)
finally: rlockmanager.release(object)
return res
## lockdec function
def lockdec(lock):
""" locking decorator. """
def locked(func):
""" locking function for %s """ % str(func)
def lockedfunc(*args, **kwargs):
""" the locked function. """
where = whichmodule(2)
logging.debug('locking - locking on %s' % where)
lock.acquire()
res = None
try: res = func(*args, **kwargs)
finally:
try:
lock.release()
logging.debug('locking - releasing %s' % where)
except: pass
return res
return lockedfunc
return locked
## locked decorator
def locked(func):
""" function locking decorator """
def lockedfunc(*args, **kwargs):
""" the locked function. """
where = getname(str(func))
try:
rlockmanager.acquire(where)
res = func(*args, **kwargs)
finally: rlockmanager.release(where)
return res
return lockedfunc
| Python |
# jsb/utils/rsslist.py
#
#
""" create a list of rss data """
## jsb imports
from exception import handle_exception
## basic imports
import xml.dom.minidom
## gettext function
def gettext(nodelist):
""" get text data from nodelist """
result = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE or node.nodeType == node.CDATA_SECTION_NODE:
stripped = node.data.strip()
if stripped: result += stripped
return result
def makersslist(xlist, nodes , d={}):
""" recurse until txt is found """
for i in nodes:
if i.nodeType == i.ELEMENT_NODE:
dd = d[i.nodeName] = {}
makersslist(xlist, i.childNodes, dd)
if dd: xlist.append(dd)
txt = gettext(i.childNodes)
if txt: d[i.nodeName] = txt
def rsslist(txt):
""" create list of dictionaries with rss data """
dom = xml.dom.minidom.parseString(txt)
result = []
makersslist(result, dom.childNodes)
return result
| Python |
# gozerbot/dol.py
#
#
""" dict of lists """
class Dol(dict):
""" dol is dict of lists """
def insert(self, nr, item, issue):
""" add issue to item entry """
if self.has_key(item): self[item].insert(nr, issue)
else: self[item] = [issue]
return True
def add(self, item, issue):
""" add issue to item entry """
if self.has_key(item): self[item].append(issue)
else: self[item] = [issue]
return True
def adduniq(self, item, issue):
""" only add issue to item if it is not already there """
if self.has_key(item):
if issue in self[item]: return False
self.add(item, issue)
return True
def delete(self, item, number):
""" del self[item][number] """
number = int(number)
if self.has_key(item):
try:
del self[item][number]
return True
except IndexError: return False
def remove(self, item, issue):
""" remove issue from item """
try:
self[item].remove(issue)
return True
except ValueError: pass
def has(self, item, issue):
""" check if item has issue """
try:
if issue in self[item]: return True
else: return False
except KeyError: pass
| Python |
# jsb/utils/statdict.py
#
#
""" dictionairy to keep stats. """
## jsb imports
from jsb.utils.lazydict import LazyDict
## classes
class StatDict(LazyDict):
""" dictionary to hold stats """
def set(self, item, value):
""" set item to value """
self[item] = value
def upitem(self, item, value=1):
""" increase item """
if not self.has_key(item):
self[item] = value
return
self[item] += value
def downitem(self, item, value=1):
""" decrease item """
if not self.has_key(item):
self[item] = value
return
self[item] -= value
def top(self, start=1, limit=None):
""" return highest items """
result = []
for item, value in self.iteritems():
if value >= start: result.append((item, value))
result.sort(lambda b, a: cmp(a[1], b[1]))
if limit: result = result[:limit]
return result
def down(self, end=100, limit=None):
""" return lowest items """
result = []
for item, value in self.iteritems():
if value <= end: result.append((item, value))
result.sort(lambda a, b: cmp(a[1], b[1]))
if limit: return result[:limit]
else: return result
| Python |
# jsb/utils/id.py
#
#
""" id related functions. """
## jsb imports
from jsb.utils.generic import toenc
## basic imports
import uuid
## getrssid function
def getrssid(url, time):
""" get an id based on url and time. """
key = unicode(url) + unicode(time)
return str(uuid.uuid3(uuid.NAMESPACE_DNS, toenc(key)))
| Python |
# jsb/utils/format.py
#
#
""" provide formatting functions. """
## jsb imports
from jsb.utils.name import stripname
from jsb.utils.url import striphtml
from jsb.utils.lazydict import LazyDict
## basic imports
import time
import os
import logging
from datetime import datetime
## formats
# Formats are defined here. simple also provides default values if values
# are not supplied by the format, as well as format 'simple'.
# Parameters that should be supplied:
# * timestamp_format: format of timestamp in log files
# * all strftime vars supported.
# * filename: file name for log
# * var channel : full channel ie. #dunkbot
# * var channel_name : channel without '#' ie. dunkbot
# * event_filename:
# if event_filename exists, then it will be used for
# logging events (seperate from chat)
# * var channel : full channel ie. #dunkbot
# * var channel_name : channel without '#' ie. dunkbot
# * separator: the separator between the timestamp and message
formats = {
'log': {
'timestamp_format': '%Y-%m-%d %H:%M:%S',
'basepath': None,
'filename': 'chatlogs/%%(network)s/simple/%%(target)s.%Y%m%d.slog',
'event_prefix': '',
'event_filename': 'chatlogs/%%(network)s/simple/%%(channel_name)s.%Y%m%d.slog',
'separator': ' ',
},
'simple': {
'timestamp_format': '%Y-%m-%d %H:%M:%S',
'basepath': None,
'filename': 'chatlogs/%%(network)s/simple/%%(target)s.%Y%m%d.slog',
'event_prefix': '',
'event_filename': 'chatlogs/%%(network)s/simple/%%(channel_name)s.%Y%m%d.slog',
'separator': ' | ',
},
'supy': {
'timestamp_format': '%Y-%m-%dT%H:%M:%S',
'filename': 'chatlogs/%%(network)s/supy/%%(target)s/%%(target)s.%Y-%m-%d.log',
'event_prefix': '*** ',
'event_filename': None,
'separator': ' ',
}
}
format = "%(message)s"
## functions
def format_opt(name, format="log"):
try: simple_format = formats[format]
except KeyError: return
f = formats.get(format, 'log')
opt = f.get(name, simple_format.get(name, None))
return opt
## formatevent function
def formatevent(bot, ievent, channels, forwarded=False):
m = {
'datetime': datetime.now(),
'separator': format_opt('separator'),
'event_prefix': format_opt('event_prefix'),
'network': bot.cfg.networkname,
'nick': ievent.nick,
'target': stripname(ievent.channel),
'botname': bot.cfg.name,
'txt': ievent.txt,
'type': ievent.cbtype
}
m = LazyDict(m)
if ievent.cmnd == 'PRIVMSG':
if ievent.txt.startswith('\001ACTION'): m.txt = '* %s %s' % (m.nick, ievent.txt[7:-1].strip())
else:
if bot.type == "irc": m.txt = '<%s> %s' % (m.nick, striphtml(ievent.txt))
elif not forwarded: m.txt = '<%s> %s' % (m.nick, bot.normalize(ievent.txt))
else: m.txt = bot.normalize(ievent.txt)
elif ievent.cmnd == 'NOTICE':
m.target = ievent.arguments[0]
m.txt = "-%s- %s"%(ievent.nick, ievent.txt)
elif ievent.cmnd == 'TOPIC': m.txt = '%s changes topic to "%s"'%(ievent.nick, ievent.txt)
elif ievent.cmnd == 'MODE':
margs = ' '.join(ievent.arguments[1:])
m.txt = '%s sets mode: %s'% (ievent.nick, margs)
elif ievent.cmnd == 'JOIN': m.txt = '%s (%s) has joined %s'%(ievent.nick, ievent.userhost, ievent.channel)
elif ievent.cmnd == 'KICK': m.txt = '%s was kicked by %s (%s)'% (ievent.arguments[1], ievent.nick, ievent.txt)
elif ievent.cmnd == 'PART': m.txt = '%s (%s) has left %s'% (ievent.nick, ievent.userhost, ievent.channel)
elif ievent.cmnd in ('QUIT', 'NICK'):
if not ievent.user or not ievent.user.data.channels:
logging.debug("chatlog - can't find joined channels for %s" % ievent.userhost)
return m
cmd = ievent.cmnd
nick = cmd == 'NICK' and ievent.txt or ievent.nick
for c in event.user.channels:
if [bot.cfg.name, c] in channels:
if True:
if cmd == 'NICK': m.txt = '%s (%s) is now known as %s'% (ievent.nick, ievent.userhost, ievent.txt)
else: m.txt= '%s (%s) has quit: %s'% (ievent.nick, ievent.userhost, ievent.txt)
m.type = ievent.cmnd.lower()
m.target = c
elif ievent.cbtype == 'PRESENCE':
if ievent.type == 'unavailable': m.txt = "%s left" % ievent.nick
else: m.txt = "%s joined" % ievent.nick
return m
| Python |
# jsb/utils/lockmanager.py
#
#
""" manages locks """
## basic imports
import thread
import threading
import logging
## LockManager class
class LockManager(object):
""" place to hold locks """
def __init__(self):
self.locks = {}
try:
import waveapi
self.gae = True
except ImportError: self.gae = False
def allocate(self, name):
""" allocate a new lock """
if self.gae: self.locks[name] = None
else: self.locks[name] = thread.allocate_lock()
logging.debug('lockmanager - allocated %s' % name)
def get(self, name):
""" get lock """
if not self.locks.has_key(name): self.allocate(name)
return self.locks[name]
def delete(self, name):
""" delete lock """
if self.locks.has_key(name): del self.locks[name]
def acquire(self, name):
""" acquire lock """
if self.gae: return
if not self.locks.has_key(name): self.allocate(name)
logging.debug('lockmanager - *acquire* %s' % name)
self.locks[name].acquire()
def release(self, name):
""" release lock """
if self.gae: return
logging.debug('lockmanager - *releasing* %s' % name)
try: self.locks[name].release()
except RuntimeError: pass
## RLockManager class
class RLockManager(LockManager):
def allocate(self, name):
""" allocate a new lock """
self.locks[name] = threading.RLock()
logging.debug('lockmanager - allocated RLock %s' % name)
## global lockmanagers
lockmanager = LockManager()
rlockmanager = RLockManager()
| Python |
# jsb/utils/generic.py
#
#
""" generic functions. """
## lib imports
from exception import handle_exception
from trace import calledfrom, whichmodule
from lazydict import LazyDict
from jsb.imports import getjson
json = getjson()
## generic imports
from stat import ST_UID, ST_MODE, S_IMODE
import time
import sys
import re
import getopt
import types
import os
import os.path
import random
import Queue
import logging
## istr class
class istr(str):
pass
## fix_format function
def fix_format(s):
counters = {
chr(2): 0,
chr(3): 0
}
for letter in s:
if letter in counters:
counters[letter] += 1
for char in counters:
if counters[char] % 2:
s += char
return s
## isdebian function
def isdebian():
""" checks if we are on debian. """
return os.path.isfile("/etc/debian_version")
## isjsbuser function
def botuser():
""" checks if the user is jsb. """
try:
import getpass
return getpass.getuser()
except ImportError: return ""
## checkpermission function
def checkpermissions(ddir, umode):
""" see if ddir has umode permission and if not set them. """
try:
uid = os.getuid()
gid = os.getgid()
except AttributeError: return
try: stat = os.stat(ddir)
except OSError: return
if stat[ST_UID] != uid:
try: os.chown(ddir, uid, gid)
except: pass
if S_IMODE(stat[ST_MODE]) != umode:
try: os.chmod(ddir, umode)
except: handle_exception()
## jsonstring function
def jsonstring(s):
""" convert s to a jsonstring. """
if type(s) == types.TupleType: s = list(s)
return json.dumps(s)
## getwho function
def getwho(bot, who):
""" get userhost from bots userhost cache """
who = who.lower()
for user in bot.userhosts:
if user.lower() == who: return bot.userhosts[user]
if bot.type == "irc":
bot.whois(who)
time.sleep(3)
for user in bot.userhosts:
if user.lower() == who: return bot.userhosts[user]
## splitxt function
def splittxt(what, l=375):
""" split output into seperate chunks. """
txtlist = []
start = 0
end = l
length = len(what)
for i in range(length/end+1):
starttag = what.find("</", end)
if starttag != -1: endword = what.find('>', end) + 1
else:
endword = what.find(' ', end)
if endword == -1: endword = length
res = what[start:endword]
if res: txtlist.append(res)
start = endword
end = start + l
return txtlist
## getrandomnick function
def getrandomnick():
""" return a random nick. """
return "jsb-" + str(random.randint(0, 100))
## decodeperchar function
def decodeperchar(txt, encoding='utf-8', what=""):
""" decode a string char by char. strip chars that can't be decoded. """
res = []
nogo = []
for i in txt:
try: res.append(i.decode(encoding))
except UnicodeDecodeError:
if i not in nogo: nogo.append(i)
if nogo:
if what: logging.debug("%s: can't decode %s characters to %s" % (what, nogo, encoding))
else: logging.debug("%s - can't decode %s characters to %s" % (whichmodule(), nogo, encoding))
return u"".join(res)
## toenc function
def toenc(what, encoding='utf-8'):
""" convert to encoding. """
if not what: what= u""
try:
w = unicode(what)
return w.encode(encoding)
except UnicodeDecodeError:
logging.debug("%s - can't encode %s to %s" % (whichmodule(2), what, encoding))
raise
## fromenc function
def fromenc(txt, encoding='utf-8', what=""):
""" convert from encoding. """
if not txt: txt = u""
try: return txt.decode(encoding)
except UnicodeDecodeError:
logging.debug("%s - can't decode %s - decoding per char" % (whichmodule(), encoding))
return decodeperchar(txt, encoding, what)
## toascii function
def toascii(what):
""" convert to ascii. """
return what.encode('ascii', 'replace')
## tolatin1 function
def tolatin1(what):
""" convert to latin1. """
return what.encode('latin-1', 'replace')
## strippedtxt function
def strippedtxt(what, allowed=[]):
""" strip control characters from txt. """
txt = []
for i in what:
if ord(i) > 31 or (allowed and i in allowed): txt.append(i)
return ''.join(txt)
## stripcolor function
REcolor = re.compile(r"\003\d(.+?)\003")
def matchcolor(match):
return match.group(1)
def stripcolor(txt):
return REcolor.sub(matchcolor, txt)
## uniqlist function
def uniqlist(l):
""" return unique elements in a list (as list). """
result = []
for i in l:
if i not in result: result.append(i)
return result
## jabberstrip function
def jabberstrip(text, allowed=[]):
""" strip control characters for jabber transmission. """
txt = []
allowed = allowed + ['\n', '\t']
for i in text:
if ord(i) > 31 or (allowed and i in allowed): txt.append(i)
return ''.join(txt)
## filesize function
def filesize(path):
""" return filesize of a file. """
return os.stat(path)[6]
## touch function
def touch(fname):
""" touch a file. """
fd = os.open(fname, os.O_WRONLY | os.O_CREAT)
os.close(fd)
## stringinlist function
def stringinlist(s, l):
""" check is string is in list of strings. """
for i in l:
if s in i: return True
return False
## stripped function
def stripped(userhost):
""" return a stripped userhost (everything before the '/'). """
return userhost.split('/')[0]
## gethighest function
def gethighest(ddir, ffile):
""" get filename with the highest extension (number). """
highest = 0
for i in os.listdir(ddir):
if os.path.isdir(ddir + os.sep + i) and ffile in i:
try: seqnr = i.split('.')[-1]
except IndexError: continue
try:
if int(seqnr) > highest: highest = int(seqnr)
except ValueError: pass
ffile += '.' + str(highest + 1)
return ffile
## waitevents function
def waitevents(eventlist, millisec=5000):
result = []
for e in eventlist:
if not e or e.bot.isgae: continue
logging.warn("waitevents - waiting for %s" % e.txt)
e.finished.wait(millisec)
res = waitforqueue(e.resqueue, 5000)
result.append(res)
e.finished.clear()
return result
## waitforqueue function
def waitforqueue(queue, timeout=5000, maxitems=None):
""" wait for results to arrive in a queue. return list of results. """
result = []
counter = 0
while 1:
try: res = queue.get_nowait()
except Queue.Empty:
time.sleep(0.001)
counter += 1
if counter > timeout: break
continue
if res == None: break
result.append(res)
if maxitems and len(result) == maxitems: break
logging.debug("waitforqueue - result is %s items" % len(result))
return result
## checkqueues function
def checkqueues(self, queues, resultlist):
""" check if resultlist is to be sent to the queues. if so do it! """
for queue in queues:
for item in resultlist: queue.put_nowait(item)
return True
return False
## dosed function
def dosed(filename, sedstring):
""" apply a sedstring to the file. """
try: f = open(filename, 'r')
except IOError: return
tmp = filename + '.tmp'
fout = open(tmp, 'w')
seds = sedstring.split('/')
fr = seds[1].replace('\\', '')
to = seds[2].replace('\\', '')
try:
for line in f:
if 'googlecode' in line or 'github' in line or 'google.com' in line or 'jsonbot.org' in line: l = line
else: l = line.replace(fr,to)
fout.write(l)
finally:
fout.flush()
fout.close()
try: os.rename(tmp, filename)
except WindowsError:
os.remove(filename)
os.rename(tmp, filename)
def stringsed(instring, sedstring):
""" apply a sedstring to a string. """
seds = sedstring.split('/')
fr = seds[1].replace('\\', '')
to = seds[2].replace('\\', '')
mekker = instring.replace(fr,to)
return mekker
def copyfile(filename, filename2, sedstring=None):
""" copy a file with optional sed. """
if os.path.isdir(filename): return
try: f = open(filename, 'r')
except IOError: return
ddir = ""
for x in filename2.split(os.sep)[:-1]:
ddir += os.sep + x
if not os.path.isdir(ddir):
try: os.mkdir(ddir)
except: pass
try: fout = open(filename2, 'w')
except: return
if sedstring:
seds = sedstring.split('/')
fr = seds[1].replace('\\', '')
to = seds[2].replace('\\', '')
try:
for line in f:
if sedstring:
l = line.replace(fr,to)
else: l = line
fout.write(l)
finally:
fout.flush()
fout.close()
| Python |
# jsb/utils/log.py
#
#
""" log module. """
## basic imports
import logging
import logging.handlers
import os
import os.path
import getpass
## defines
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'warn': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
RLEVELS = {logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'warn',
logging.ERROR: 'error',
logging.CRITICAL: 'critical'
}
try:
import waveapi
except ImportError:
LOGDIR = os.path.expanduser("~") + os.sep + ".jsb" + os.sep + "botlogs" # BHJTW change this for debian
try:
ddir = os.sep.join(LOGDIR.split(os.sep)[:-1])
if not os.path.isdir(ddir): os.mkdir(ddir)
except: pass
try:
if not os.path.isdir(LOGDIR): os.mkdir(LOGDIR)
except: pass
format_short = "[!] %(asctime)-8s - %(module)+10s.%(funcName)-10s - %(message)s"
format = "[!] %(asctime)s.%(msecs)-13s - %(module)s.%(funcName)s:%(lineno)s - %(message)s - %(levelname)s - <%(threadName)s>"
datefmt = '%H:%M:%S'
formatter_short = logging.Formatter(format_short, datefmt=datefmt)
formatter = logging.Formatter(format, datefmt=datefmt)
try:
import waveapi
except ImportError:
try:
filehandler = logging.handlers.TimedRotatingFileHandler(LOGDIR + os.sep + "jsb.log", 'midnight')
except IOError:
filehandler = None
## setloglevel function
def setloglevel(level_name="warn"):
""" set loglevel to level_name. """
if not level_name: return
level = LEVELS.get(str(level_name).lower(), logging.NOTSET)
root = logging.getLogger()
root.setLevel(level)
if root and root.handlers:
for handler in root.handlers: root.removeHandler(handler)
ch = logging.StreamHandler()
ch.setLevel(level)
if level_name in ["debug",]: ch.setFormatter(formatter) ; filehandler.setFormatter(formatter)
else: ch.setFormatter(formatter_short) ; filehandler.setFormatter(formatter_short)
try: import waveapi
except ImportError:
root.addHandler(ch)
if filehandler: root.addHandler(filehandler)
logging.warn("loglevel is %s (%s)" % (str(level), level_name))
def getloglevel():
import logging
root = logging.getLogger()
return RLEVELS.get(root.level)
| Python |
# jsb/utils/textutils.py
#
#
## basic imports
import cgi
import re
import htmlentitydefs
## unescape_charref function
def unescape_charref(ref):
""" ask maze. """
name = ref[2:-1]
base = 10
if name.startswith("x"):
name = name[1:]
base = 16
return unichr(int(name, base))
## replace_entities function
def replace_entities(match):
""" ask maze. """
ent = match.group()
if ent[1] == "#": return unescape_charref(ent)
repl = htmlentitydefs.najsbcodepoint.get(ent[1:-1])
if repl is not None: repl = unichr(repl)
else: repl = ent
return repl
## html_unescape function
def html_unescape(data):
''' unescape (numeric) HTML entities. '''
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
## html_escape function
def html_escape(data):
''' escape HTML entities. '''
return cgi.escape(data)
| Python |
# gozerbot/pdol.py
#
#
""" pickled dict of lists """
## jsb imports
from jsb.lib.persist import Persist
## Pdol class
class Pdol(Persist):
""" pickled dict of lists """
def __init__(self, fname):
Persist.__init__(self, fname)
if not self.data: self.data = {}
def __iter__(self, name):
return self.data[name].__iter__()
def __getitem__(self, item):
if self.data.has_key(item): return self.data[item]
def __delitem__(self, item):
if self.data.has_key(item):
self.data.__delitem__(item)
return 1
def __setitem__(self, item, what):
self.data[item] = what
return self
def add(self, item, what):
""" add what to items list """
return self.__setitem__(item, what)
def adduniq(self, item, what):
""" add what to items list if item not yet added """
if not self.data.has_key(item): self.new(item)
if what not in self.data[item]: return self.__setitem__(item, what)
def get(self, item):
""" get items list """
return self.__getitem__(item)
def new(self, what):
""" reset list of what """
self.data[what] = []
def delete(self, item, what):
""" remove what from item's list """
del self.data[item][what]
def extend(self, item, what):
""" extend the list. """
if not self.data.has_key(item): self.new(item)
self.data[item].extend(what)
def remove(self, item, what):
""" remove from list. """
try:
self.data[item].remove(what)
return True
except (ValueError, KeyError): return False
def insert(self, item, index, what):
""" insert in list. """
if not self.data.has_key(item): self.new(item)
self.data[item].insert(index, what) | Python |
# jsb/utils/exception.py
#
#
""" exception related functions. """
## basic imports
import sys
import traceback
import logging
import thread
import os
import logging
## defines
exceptionlist = []
exceptionevents = []
ERASE_LINE = '\033[2K'
BOLD='\033[1m'
RED = '\033[91m'
YELLOW = '\033[93m'
GREEN = '\033[92m'
ENDC = '\033[0m'
## exceptionmsg function
def exceptionmsg():
""" create exception message as a string. """
exctype, excvalue, tb = sys.exc_info()
trace = traceback.extract_tb(tb)
result = ""
for i in trace:
fname = i[0]
linenr = i[1]
func = i[2]
plugfile = fname[:-3].split(os.sep)
mod = []
for i in plugfile[::-1]:
if i in ['jsb.upload']: break
mod.append(i)
if i in ['jsb', 'waveapi', 'google', 'data']: break
ownname = '.'.join(mod[::-1])
result += "%s:%s %s | " % (ownname, linenr, func)
del trace
res = "%s%s: %s" % (result, exctype, excvalue)
if res not in exceptionlist: exceptionlist.append(res)
return res
## handle_exception function
def handle_exception(event=None, log=True, txt="", stop=False):
""" handle exception.. for now only print it. """
errormsg = exceptionmsg()
if txt: errormsg = "%s - %s" % (txt, errormsg)
if log: logging.error(RED + txt + " " + errormsg + ENDC)
if event:
exceptionevents.append((event, errormsg))
if event.bot:
event.bot.error = errormsg
if event.bot.type == "irc": target = event.nick
else: target = event.channel
if target: event.bot.saynocb(target, "*sorry* - an exception occured - %s" % errormsg)
if stop: os._exit(1)
| Python |
# jsb/utils/source.py
#
#
""" get the location of a source """
## basic imports
import os
import logging
## getsource function
def getsource(mod):
source = None
splitted = mod.split(".")
if len(splitted) == 1: splitted.append("")
try:
import pkg_resources
source = pkg_resources.resource_filename(".".join(splitted[:len(splitted)-1]), splitted[-1])
except ImportError:
thedir = mod.replace(".", os.sep)
if os.path.isdir(thedir): source = thedir
if not source and os.path.isdir("/usr/lib/jsb"): source = "/usr/lib" + os.sep + thedir # BHJTW set this to /var/cache on debian
logging.info("datadir - source is %s" % source)
return source
| Python |
# jsb/utils/url.py
#
# most code taken from maze
""" url related functions. """
## jsb imports
from generic import fromenc, toenc
## basic imports
import logging
import time
import sys
import re
import traceback
import Queue
import urllib
import urllib2
import urlparse
import socket
import random
import os
import sgmllib
import thread
import types
import httplib
import StringIO
import htmlentitydefs
import tempfile
import cgi
## defines
re_url_match = re.compile(u'((?:http|https)://\S+)')
try: import chardet
except ImportError: chardet = None
class istr(str):
pass
## useragent function
def useragent():
""" provide useragent string """
from jsb.version import version
(name, version) = version.split()[0:2]
return 'Mozilla/5.0 (compatible; %s %s; http://jsonbot.appspot.com)' % (name, version)
## CBURLopener class
class CBURLopener(urllib.FancyURLopener):
""" our URLOpener """
def __init__(self, version, *args):
if version: self.version = version
else: self.version = useragent()
urllib.FancyURLopener.__init__(self, *args)
## geturl function
def geturl(url, version=None):
""" fetch an url. """
urllib._urlopener = CBURLopener(version)
logging.info('fetching %s' % url)
result = urllib.urlopen(url)
tmp = result.read()
result.close()
return tmp
## geturl2 function
def geturl2(url, decode=False):
""" use urllib2 to fetch an url. """
logging.info('fetching %s' % url)
request = urllib2.Request(url)
request.add_header('User-Agent', useragent())
opener = urllib2.build_opener()
result = opener.open(request)
tmp = result.read()
info = result.info()
result.close()
if decode:
encoding = get_encoding(tmp)
logging.info('%s encoding: %s' % (url, encoding))
res = istr(fromenc(tmp, encoding, url))
else: res = istr(tmp)
res.info = info
return res
## geturl4 function
def geturl4(url, myheaders={}, postdata={}, keyfile="", certfile="", port=80):
""" use httplib to fetch an url. """
headers = {'Content-Type': 'text/html', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
urlparts = urlparse.urlparse(url)
try:
port = int(urlparts[1].split(':')[1])
host = urlparts[1].split(':')[0]
except: host = urlparts[1]
if keyfile: connection = httplib.HTTPSConnection(host, port, keyfile, certfile)
elif 'https' in urlparts[0]: connection = httplib.HTTPSConnection(host, port)
else: connection = httplib.HTTPConnection(host, port)
if type(postdata) == types.DictType: postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('GET', urlparts[2])
return connection.getresponse()
## posturl function
def posturl(url, myheaders, postdata, keyfile=None, certfile="",port=80):
""" very basic HTTP POST url retriever. """
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
urlparts = urlparse.urlparse(url)
if keyfile: connection = httplib.HTTPSConnection(urlparts[1], port, keyfile, certfile)
else: connection = httplib.HTTPConnection(urlparts[1])
if type(postdata) == types.DictType: postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('POST', urlparts[2], postdata, headers)
return connection.getresponse()
## delete url function
def deleteurl(url, myheaders={}, postdata={}, keyfile="", certfile="", port=80):
""" very basic HTTP DELETE. """
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
urlparts = urlparse.urlparse(url)
if keyfile and certfile: connection = httplib.HTTPSConnection(urlparts[1], port, keyfile, certfile)
else: connection = httplib.HTTPConnection(urlparts[1])
if type(postdata) == types.DictType: postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('DELETE', urlparts[2], postdata, headers)
return connection.getresponse()
## put url function
def puturl(url, myheaders={}, postdata={}, keyfile="", certfile="", port=80):
""" very basic HTTP PUT. """
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
headers.update(myheaders)
urlparts = urlparse.urlparse(url)
if keyfile: connection = httplib.HTTPSConnection(urlparts[1], port, keyfile, certfile)
else: connection = httplib.HTTPConnection(urlparts[1])
if type(postdata) == types.DictType: postdata = urllib.urlencode(postdata)
logging.info('fetching %s' % url)
connection.request('PUT', urlparts[2], postdata, headers)
return connection.getresponse()
## getpostdata function
def getpostdata(event):
""" retrive post data from url data. """
try:
ctype, pdict = cgi.parse_header(event.headers.getheader('content-type'))
except AttributeError: ctype, pdict = cgi.parse_header(event.headers.get('content-type'))
body = cgi.FieldStorage(fp=event.rfile, headers=event.headers, environ = {'REQUEST_METHOD':'POST'}, keep_blank_values = 1)
result = {}
for name in dict(body): result[name] = body.getfirst(name)
return result
## decode_html_entities function
def decode_html_entities(s):
""" smart decoding of html entities to utf-8 """
re_ent_match = re.compile(u'&([^;]+);')
re_entn_match = re.compile(u'&#([^;]+);')
s = s.decode('utf-8', 'replace')
def to_entn(match):
""" convert to entities """
if htmlentitydefs.entitydefs.has_key(match.group(1)):
return htmlentitydefs.entitydefs[match.group(1)].decode('latin1', 'replace')
return match.group(0)
def to_utf8(match):
""" convert to utf-8 """
return unichr(long(match.group(1)))
s = re_ent_match.sub(to_entn, s)
s = re_entn_match.sub(to_utf8, s)
return s
## get_encoding function
def get_encoding(data):
""" get encoding from web data """
if hasattr(data, 'info') and data.info.has_key('content-type') and 'charset' in data.info['content-type'].lower():
charset = data.info['content-type'].lower().split('charset', 1)[1].strip()
if charset[0] == '=':
charset = charset[1:].strip()
if ';' in charset: return charset.split(';')[0].strip()
return charset
if '<meta' in data.lower():
metas = re.findall(u'<meta[^>]+>', data, re.I | re.M)
if metas:
for meta in metas:
test_http_equiv = re.search('http-equiv\s*=\s*[\'"]([^\'"]+)[\'"]', meta, re.I)
if test_http_equiv and test_http_equiv.group(1).lower() == 'content-type':
test_content = re.search('content\s*=\s*[\'"]([^\'"]+)[\'"]', meta, re.I)
if test_content:
test_charset = re.search('charset\s*=\s*([^\s\'"]+)', meta, re.I)
if test_charset: return test_charset.group(1)
if chardet:
test = chardet.detect(data)
if test.has_key('encoding'): return test['encoding']
return sys.getdefaultencoding()
## Stripper class
class Stripper(sgmllib.SGMLParser):
""" html stripper. """
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def strip(self, some_html):
""" strip html. """
self.theString = u""
self.feed(some_html)
self.close()
return self.theString
def handle_data(self, data):
""" data handler. """
self.theString += data
## striphtml function
def striphtml(txt):
""" strip html from txt. """
stripper = Stripper()
txt = stripper.strip(txt)
return txt
| Python |
# jsb/utils/xmpp.py
#
#
""" XMPP related helper functions. """
def stripped(userhost):
""" strip resource from userhost. """
return userhost.split('/')[0]
def resource(userhost):
""" return resource of userhost. """
try: return userhost.split('/')[1]
except ValueError: return ""
| Python |
# lib/utils/timeutils.py
#
#
""" time related helper functions. """
## jsb imports
from exception import handle_exception
## basic imports
import time
import re
import calendar
## defines
leapfactor = float(6*60*60)/float(365*24*60*60)
timere = re.compile('(\S+)\s+(\S+)\s+(\d+)\s+(\d+):(\d+):(\d+)\s+(\d+)')
bdmonths = ['Bo', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
## elapsedstring function
def elapsedstring(nsec, ywd = None):
""" given the number of seconds return a string of the elapsed time. """
nsec = int(float(nsec))
year = 365*24*60*60
week = 7*24*60*60
day = 24*60*60
hour = 60*60
minute = 60
nsec -= nsec * leapfactor
years = int(nsec/year)
nsec -= years*year
weeks = int(nsec/week)
nsec -= weeks*week
days = int(nsec/day)
nsec -= days*day
hours = int(nsec/hour)
nsec -= hours*hour
minutes = int(nsec/minute)
sec = int(nsec - minutes*minute)
result = ''
if (years > 1): result = str(years) + " years "
if (years == 1): result = "1 year "
if (weeks > 1): result += str(weeks) + " weeks "
if (weeks == 1): result += "1 week "
if (days > 1):
if ywd: result += 'and '+ str(days) + " days"
else: result += str(days) + " days "
if (days == 1):
if ywd: result += 'and 1 day'
else: result += "1 day "
if ywd: return result
if (hours > 1): result += str(hours) + " hours "
if (hours == 1): result += "1 hour "
if (minutes > 1): result += str(minutes) + " minutes "
if (minutes == 1): result += "1 minute "
if sec == 0:
if result: return result
else: return 0
if (sec == 1):
if result: result += "and 1 second "
else: result = "1 second"
else:
if result: result += "and " + str(sec) + " seconds"
else: result = str(sec) + " seconds"
return result.strip()
## hourmin function
def hourmin(ttime):
""" return the hours:minutes of a unix timestamp. """
result = ""
timeres = time.localtime(ttime)
if timeres[3] < 10: result += "0" + str(timeres[3]) + ":"
else: result += str(timeres[3]) + ":"
if timeres[4] < 10: result += "0" + str(timeres[4])
else: result += str(timeres[4])
return result
## striptime function
def striptime(what):
""" strip time indicators from string. """
what = str(what)
what = re.sub('\d+-\d+-\d+', '', what)
what = re.sub('\d+-\d+', '', what)
what = re.sub('\d+:\d+', '', what)
what = re.sub('\s+', ' ', what)
return what.strip()
## now function
def now():
""" return current time. """
if time.daylight: ttime = time.ctime(time.time() + int(time.timezone) + 3600)
else: ttime = time.ctime(time.time() + int(time.timezone))
return ttime
## today function
def today():
""" return time of 0:00 today. """
if time.daylight: ttime = time.ctime(time.time() + int(time.timezone) + 3600)
else: ttime = time.ctime(time.time() + int(time.timezone))
matched = re.search(timere, ttime)
if matched:
temp = "%s %s %s" % (matched.group(3), matched.group(2), matched.group(7))
timestring = time.strptime(temp, "%d %b %Y")
result = time.mktime(timestring)
return result
## strtotime function
def strtotime(what):
""" convert string to time. """
daymonthyear = 0
hoursmin = 0
try:
dmyre = re.search('(\d+)-(\d+)-(\d+)', str(what))
if dmyre:
(day, month, year) = dmyre.groups()
day = int(day)
month = int(month)
year = int(year)
if day <= calendar.monthrange(year, month)[1]:
date = "%s %s %s" % (day, bdmonths[month], year)
daymonthyear = time.mktime(time.strptime(date, "%d %b %Y"))
else: return None
else:
dmre = re.search('(\d+)-(\d+)', str(what))
if dmre:
year = time.localtime()[0]
(day, month) = dmre.groups()
day = int(day)
month = int(month)
if day <= calendar.monthrange(year, month)[1]:
date = "%s %s %s" % (day, bdmonths[month], year)
daymonthyear = time.mktime(time.strptime(date, "%d %b %Y"))
else: return None
hmsre = re.search('(\d+):(\d+):(\d+)', str(what))
if hmsre:
(h, m, s) = hmsre.groups()
h = int(h)
m = int(m)
s = int(s)
if h > 24 or h < 0 or m > 60 or m < 0 or s > 60 or s < 0: return None
hours = 60 * 60 * (int(hmsre.group(1)))
hoursmin = hours + int(hmsre.group(2)) * 60
hms = hoursmin + int(hmsre.group(3))
else:
hmre = re.search('(\d+):(\d+)', str(what))
if hmre:
(h, m) = hmre.groups()
h = int(h)
m = int(m)
if h > 24 or h < 0 or m > 60 or m < 0: return None
hours = 60 * 60 * (int(hmre.group(1)))
hms = hours + int(hmre.group(2)) * 60
else: hms = 0
if not daymonthyear and not hms: return None
if daymonthyear == 0: heute = today()
else: heute = daymonthyear
return heute + hms
except OverflowError: return None
except ValueError:return None
except Exception, ex: pass
## uurminsec function
def uurminsec(ttime):
""" return hours:minutes:seconds of the given time. """
result = ""
timeres = time.localtime(ttime)
if timeres[3] < 10: result += "0" + str(timeres[3]) + ":"
else: result += str(timeres[3]) + ":"
if timeres[4] < 10: result += "0" + str(timeres[4]) + ":"
else: result += str(timeres[4]) + ":"
if timeres[5] < 10: result += "0" + str(timeres[5])
else: result += str(timeres[5])
return result
## getdaymonth function
def getdaymonth(ttime):
""" return day-month of the given time. """
timestr = time.ctime(ttime)
result = re.search(timere, timestr)
if result: return (result.group(3), result.group(2))
else: return (None, None)
## getdaymonthyear function
def getdaymonthyear(ttime):
""" return day-month-year of the given time. """
timestr = time.ctime(ttime)
result = re.search(timere, timestr)
if result: return (result.group(3), result.group(2), result.group[7])
else: return (None, None, None)
## dmy function
def dmy(ttime):
""" return day month year as a string. """
timestr = time.ctime(ttime)
result = re.search(timere, timestr)
if result: return "%s %s %s" % (result.group(3), result.group(2), result.group(7))
| Python |
# jsb/utils/twitter.py
#
#
""" twitter related helper functions .. uses tweepy. """
## tweepy imports
from jsb.contrib.tweepy.auth import OAuthHandler
from jsb.contrib.tweepy.api import API
from jsb.contrib.tweepy import oauth
## basic imports
import logging
## defines
go = True
## twitterapi function
def twitterapi(CONSUMER_KEY, CONSUMER_SECRET, token=None, *args, **kwargs):
""" return twitter API object - with or without access token. """
if not go:
logging.warn("the twitter plugin needs the credentials.py file in the .jsb/data/config dir. see .jsb/data/examples".upper())
return None
if token:
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(token.key, token.secret)
return API(auth, *args, **kwargs)
## twittertoken function
def twittertoken(CONSUMER_KEY, CONSUMER_SECRET, twitteruser, username):
""" get access token from stored token string. """
token = twitteruser.data.get(username)
if not token: return
return oauth.OAuthToken(CONSUMER_KEY, CONSUMER_SECRET).from_string(token)
| Python |
# jsb/utils/popen.py
#
#
""" popen helper functions. """
## defines
go = False
## basic imports
try:
from subprocess import Popen, PIPE
from locking import lockdec
import thread, StringIO, logging, types
go = True
except: go = False
if go:
## locks
popenlock = thread.allocate_lock()
popenlocked = lockdec(popenlock)
## exceptions
class PopenWhitelistError(Exception):
def __init__(self, item):
Exception.__init__(self)
self.item = item
def __str__(self):
return self.item
class PopenListError(Exception):
def __init__(self, item):
Exception.__init__(self)
self.item = item
def __str__(self):
return str(self.item)
## GozerStringIO class
class GozerStringIO(StringIO.StringIO):
""" provide readlines support on a StringIO object. """
def readlines(self):
""" read multiple lines. """
return self.read().split('\n')
## GozerPopen4 class
class GozerPopen4(Popen):
""" extend the builtin Popen class with a close method. """
def __init__(self, args):
Popen.__init__(self, args, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
self.fromchild = self.stdout
self.tochild = self.stdin
self.errors = self.stderr
def close(self):
""" shutdown. """
self.wait()
try: self.stdin.close()
except: pass
try: self.stdout.close()
except: pass
try: self.errors.close()
except: pass
return self.returncode
## gozerpopen function
def gozerpopen(args, userargs=[]):
""" do the actual popen .. make sure the arguments are passed on as list. """
if type(args) != types.ListType: raise PopenListError(args)
if type(userargs) != types.ListType: raise PopenListError(args)
for i in userargs:
if i.startswith('-'): raise PopenWhitelistError(i)
proces = GozerPopen4(args + userargs)
return proces
| Python |
# jsb/utils/fileutils.py
#
# Description: Various file utilities
# Author: Wijnand 'tehmaze' Modderman
# Author URL: http://tehmaze.com
# License: BSD
""" provide file related helpers. """
## jsb imports
from jsb.utils.generic import istr
## basic imports
import tarfile
import os
import types
import cStringIO
import bz2
import gzip
## tarextract function
def tarextract(package, fileobj=None, prefix=None, base=None):
'''
Extracts a tarball from ``package``, or, if ``fileobj`` is either a string or a seekable
IO stream, it will extract the data from there. We only extract files from the tarball
that are member of the ``base`` directory if a ``base`` is specified.
'''
extracted = []
if fileobj:
if type(fileobj) == types.StringType: fileobj = cStringIO.StringIO(fileobj)
tarf = tarfile.open(mode='r|', fileobj=fileobj)
else: tarf = tarfile.open(package, 'r')
for tarinfo in tarf:
if tarinfo.name.startswith('/'): tarinfo.name = tarinfo.name[1:] # strip leading /
if not base or ((tarinfo.name.rstrip('/') == base and tarinfo.isdir()) or tarinfo.name.startswith(base+os.sep)):
if prefix: tarinfo.name = '/'.join([prefix, tarinfo.name])
tarf.extract(tarinfo)
extracted.append(tarinfo.name)
tarf.close()
if fileobj:
try: fileobj.close()
except: pass
del fileobj
return extracted
## unzip functions
def bunzip2(fileobj):
""" bunzip2 the file object. """
return bz2.decompress(fileobj)
def gunzip(fileobj):
""" gunzip the file object. """
if type(fileobj) == types.StringType or isinstance(fileobj, istr): fileobj = cStringIO.StringIO(str(fileobj))
return gzip.GzipFile(mode='rb', fileobj=fileobj).read()
## mtime functions
def mtime(path):
""" return last modification time. """
try: return os.stat(os.getcwd + os.sep + package.replace(".", os.sep))[stat.ST_MTIME]
except: pass
| Python |
# jsb/utils/lazydict.py
#
# thnx to maze
""" a lazydict allows dotted access to a dict .. dict.key. """
## jsb imports
from jsb.utils.locking import lockdec
from jsb.utils.exception import handle_exception
from jsb.utils.trace import whichmodule
from jsb.lib.errors import PropertyIgnored
from jsb.imports import getjson
json = getjson()
## basic imports
from xml.sax.saxutils import unescape
import copy
import logging
import uuid
import types
import threading
import os
import re
## locks
lock = threading.RLock()
locked = lockdec(lock)
## defines
jsontypes = [types.StringType, types.UnicodeType, types.DictType, types.ListType, types.IntType]
defaultignore = ['userhosts', 'owner', 'comments', 'result', 'plugs', 'origevent', 'passwords', 'key', 'finished', 'inqueue', 'resqueue', 'outqueue', 'waitlist', 'comments', 'createdfrom', 'modname', 'cfile', 'dir', 'filename', 'webchannels', 'tokens', 'token', 'cmndperms', 'gatekeeper', 'stanza', 'isremote', 'iscmnd', 'orig', 'bot', 'origtxt', 'body', 'subelements', 'args', 'rest', 'pass', 'password', 'fsock', 'sock', 'handlers', 'users', 'plugins']
cpy = copy.deepcopy
## checkignore function
def checkignore(name, ignore):
""" see whether a element attribute (name) should be ignored. """
name = unicode(name)
if name.startswith('_'): return True
for item in ignore:
if item == name:
#logging.debug("lazydict - ignoring on %s" % name)
return True
return False
## stripignore function
def stripignore(d):
for name in defaultignore:
try: del d[name]
except KeyError: pass
return d
#@locked
def dumpelement(element, prev={}, withtypes=False):
""" check each attribute of element whether it is dumpable. """
elem = cpy(element)
if not elem: elem = element
try: new = LazyDict(prev)
except (TypeError, ValueError): new = LazyDict()
for name in elem:
#logging.debug("lazydict - trying dump of %s" % name)
if checkignore(name, defaultignore): continue
#if not elem[name]: continue
try:
json.dumps(elem[name])
try: new[name] = stripignore(elem[name])
except: new[name] = elem[name]
except TypeError:
if type(elem) not in jsontypes:
if withtypes: new[name] = unicode(type(elem))
else:
logging.warn("lazydict - dumpelement - %s" % elem[name])
new[name] = dumpelement(elem[name], new)
return stripignore(new)
## LazyDict class
class LazyDict(dict):
""" lazy dict allows dotted access to a dict """
def __deepcopy__(self, a):
return LazyDict(a)
def __getattr__(self, attr, default=None):
""" get attribute. """
if not self.has_key(attr):
#mod = whichmodule()
#if not "queue" in attr: logging.debug("lazydict - %s is not set - %s" % (attr, mod))
return
return self[attr]
#def __str__(self): return self.tojson()
def __setattr__(self, attr, value):
""" set attribute. """
if self.has_key(attr) and type(self[attr]) in [types.FunctionType, types.MethodType]:
mod = whichmodule()
logging.error("lazydict - cannot change a function of method: %s - called from %s" % (attr, mod))
return
self[attr] = value
def render(self, template):
temp = open(template, 'r').read()
for key, value in self.iteritems():
try: temp = temp.replace("{{ %s }}" % key, value)
except: pass
return temp
def dostring(self):
""" return a string representation of the dict """
res = ""
cp = dict(self)
for item, value in cp.iteritems(): res += "%r=%r " % (item, value)
return res
def tojson(self, withtypes=False):
""" dump the lazydict object to json. """
try: return json.dumps(dumpelement(self, withtypes))
except RuntimeError, ex: handle_exception()
def dump(self, withtypes=False):
""" just dunp the lazydict object. DON'T convert to json. """
#logging.warn("lazydict - dumping - %s" % type(self))
try: return dumpelement(cpy(self), withtypes)
except RuntimeError, ex: handle_exception()
def load(self, input):
""" load from json string. """
try: temp = json.loads(input)
except ValueError:
handle_exception()
logging.error("lazydict - can't decode %s" % input)
return self
if type(temp) != dict:
logging.error("lazydict - %s is not a dict" % str(temp))
return self
self.update(temp)
return self
def tofile(self, filename):
f = open(filename + ".tmp", 'w')
f.write(self.tojson())
f.close()
os.rename(filename + '.tmp', filename)
def fromfile(self, filename):
f = open(filename, "r")
self.update(json.loads(f.read()))
| Python |
# gozerbot/pdod.py
#
#
""" pickled dicts of dicts """
## jsb imports
from jsb.utils.lazydict import LazyDict
from jsb.lib.persist import Persist
## Pdod class
class Pdod(Persist):
""" pickled dicts of dicts """
def __init__(self, filename):
Persist.__init__(self, filename)
if not self.data: self.data = LazyDict()
def __getitem__(self, name):
""" return item with name """
if self.data.has_key(name): return self.data[name]
def save(self):
Persist.save(self)
def __delitem__(self, name):
""" delete name item """
if self.data.has_key(name): return self.data.__delitem__(name)
def __setitem__(self, name, item):
""" set name item """
self.data[name] = item
def __contains__(self, name):
return self.data.__contains__(name)
def setdefault(self, name, default):
""" set default of name """
return self.data.setdefault(name, default)
def has_key(self, name):
""" has name key """
return self.data.has_key(name)
def has_key2(self, name1, najsb):
""" has [name1][najsb] key """
if self.data.has_key(name1): return self.data[name1].has_key(najsb)
def get(self, name1, najsb):
""" get data[name1][najsb] """
try:
result = self.data[name1][najsb]
return result
except KeyError: pass
def set(self, name1, najsb, item):
""" set name, najsb item """
if not self.data.has_key(name1): self.data[name1] = {}
self.data[name1][najsb] = item
| Python |
# jsb/utils/web.py
#
#
""" google auth related functions. """
## jsb imports
from jsb.utils.trace import whichmodule
## finduser
def finduser():
""" try to find the email of the current logged in user. """
from google.appengine.api import users as gusers
user = gusers.get_current_user()
if user: return user.email()
return ""
## checkuser
def checkuser(response, request, event=None):
"""
check for user based on web response. first try google
otherwise return 'notath@IP'
"""
from google.appengine.api import users as gusers
userhost = "notauth"
u = "notauth"
nick = "notauth"
user = gusers.get_current_user()
if event: hostid = "%s-%s" % (request.remote_addr, event.bot.uuid)
else: hostid = request.remote_addr
if not user:
try:
email = request.get('USER_EMAIL')
if not email: email = "notauth"
auth_domain = request.get('AUTH_DOMAIN')
who = request.get('who')
if not who:who = email
if auth_domain: userhost = nick = "%s@%s" % (who, auth_domain)
else: userhost = nick = "%s@%s" % (who, hostid)
except KeyError: userhost = nick = "notauth@%s" % hostid
else:
userhost = user.email() or user.nickname()
if not userhost: userhost = nick = "notauth@%s" % hostid
nick = user.nickname()
u = userhost
cfrom = whichmodule()
if 'jsb' in cfrom:
cfrom = whichmodule(1)
if 'jsb' in cfrom: cfrom = whichmodule(2)
return (userhost, user, u, nick)
| Python |
# jsb/utils/web.py
#
#
""" web related functions. """
## jsb imports
from jsb.utils.generic import fromenc
from jsb.version import getversion
from jsb.lib.config import Config
from jsb.utils.lazydict import LazyDict
## gaelib imports
from auth import finduser
## basic imports
import os
import time
import socket
import urlparse
## defines
openIdProviders = [
'Gmail.com',
'Google.com',
'Yahoo.com',
'MySpace.com',
'AOL.com',
'MyOpenID.com',
]
## create_openid_url
def create_openid_url(continue_url):
continue_url = urlparse.urljoin(self.request.url, continue_url)
return "/_ah/login?continue=%s" % urllib.quote(continue_url)
## mini
def mini(response, input={}):
""" display start html so that bot output can follow. """
inputdict = LazyDict({'version': getversion()})
if input: inputdict.update(input)
temp = os.path.join(os.getcwd(), 'templates/mini.html')
outstr = template.render(temp)
response.out.write(outstr)
## start
def start(response, input={}):
""" display start html so that bot output can follow. """
try: host = socket.gethostname()
except AttributeError:
if os.environ.get('HTTP_HOST'): host = os.environ['HTTP_HOST']
else: host = os.environ['SERVER_NAME']
template = LazyDict({'version': getversion(), 'host': host, 'color': Config().color or "#C54848"})
if input: template.update(input)
temp = os.path.join(os.getcwd(), 'templates/console.html')
outstr = template.render(temp)
response.out.write(outstr)
## login
def login(response, input={}):
""" display start html so that bot output can follow. """
try: host = socket.gethostname()
except AttributeError:
if os.environ.get('HTTP_HOST'): host = os.environ['HTTP_HOST']
else: host = os.environ['SERVER_NAME']
template = LazyDict({'version': getversion(), 'host': host, 'color': Config().color or "#C54848"})
if input: template.update(input)
temp = os.path.join(os.getcwd(), 'templates/login.html')
outstr = template.render(temp)
response.out.write(outstr)
## commandbox (testing purposes)
def commandbox(response, url="/dispatch/"):
""" write html data for the exec box. """
response.out.write("""
<form action="%s" method="post">
<div><b>enter command:</b> <input type="commit" name="content"></div>
</form>
""" % url)
## execdbox (testing purposes)
def execbox(response, url="/exec/"):
""" write html data for the exec box. """
response.out.write("""
<form action="" method="GET">
<b>enter command:</b><input type="commit" name="input" value="">
// <input type="button" value="go" onClick="makePOSTRequest(this.form)"
</form>
""")
## closer
def closer(response):
""" send closing html .. comes after the bot output. """
response.out.write('</div><div class="footer">')
response.out.write('<b>%4f seconds</b></div>' % (time.time() - response.starttime))
response.out.write('</body></html>')
## loginurl
def loginurl(request, response):
""" return google login url. """
from google.appengine.api import users as gusers
urls = {}
for p in openIdProviders:
p_name = p.split('.')[-2]
p_url = p.lower()
try:
url = gusers.create_login_url(federated_identity=p_url)
if not url: url = create_openid_url(p_url)
except TypeError: continue
urls[p_name] = url
return urls
## logouturl
def logouturl(request, response):
""" return google login url. """
from google.appengine.api import users as gusers
return gusers.create_logout_url(request.uri)
| Python |
# jsb/utils/limlist.py
#
#
""" limited list """
class Limlist(list):
""" list with limited number of items """
def __init__(self, limit):
self.limit = limit
list.__init__(self)
def insert(self, index, item):
""" insert item at index .. pop oldest item if limit is reached """
if index > len(self): return -1
if len(self) >= self.limit: self.pop(len(self)-1)
list.insert(self, index, item)
def append(self, item):
""" add item to list .. pop oldest item if limit is reached """
if len(self) >= self.limit: self.pop(0)
list.append(self, item)
| Python |
# jsb/utils/name.py
#
#
"""
name related helper functions.
google requirements on file names:
- It must contain only letters, numbers, _, +, /, $, ., and -.
- It must be less than 256 chars.
- It must not contain "/./", "/../", or "//".
- It must not end in "/".
- All spaces must be in the middle of a directory or file name.
"""
## jsb imports
from jsb.utils.generic import toenc, fromenc
from jsb.lib.errors import NameNotSet
## basic imports
import string
import os
import re
## defines
allowednamechars = string.ascii_letters + string.digits + '_+/$.-'
## slugify function taken from django (not used now)
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = unicodedata.normalize('NFKD', value)
value = unicode(re.sub('[^\w\s-]', '', value).strip())
return re.sub('[-\s]+', '-', value)
## stripname function
def stripname(namein, allowed=""):
""" strip all not allowed chars from name. """
if not namein: raise NameNotSet(namein)
n = namein.replace(os.sep, '+')
n = n.replace("/", '+')
n = n.replace("@", '+')
n = n.replace("#", '-')
n = n.replace("!", '.')
res = []
allow = allowednamechars + allowed
for c in n:
if ord(c) < 31: continue
elif c in allow: res.append(c)
else: res.append("-" + str(ord(c)))
return ''.join(res)
## testname function
def testname(name):
""" test if name is correct. """
for c in name:
if c not in allowednamechars or ord(c) < 31: return False
return True
def oldname(name):
from jsb.lib.datadir import getdatadir
if name.startswith("-"): name[0] = "+"
name = name.replace("@", "+")
if os.path.exists(getdatadir() + os.sep + name): return name
name = name.replace("-", "#")
name = prevchan.replace("+", "@")
if os.path.exists(getdatadir() + os.sep + name): return name
return ""
| Python |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated',
'issued': 'published',
'issued_parsed': 'published',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def najsbcp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'najsbcodepoint'): # requires Python 2.3
return htmlentitydefs.najsbcodepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: najsbcp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(najsbcp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
#self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
#self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
#self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
pass
#self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
#result['modified'] = _parse_date(last_modified)
result['modified'] = last_modified
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
| Python |
## xmlstream.py
##
## Copyright (C) 2001 Matthew Allum
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published
## by the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
"""\
xmlstream.py provides simple functionality for implementing
XML stream based network protocols. It is used as a base
for jabber.py.
xmlstream.py manages the network connectivity and xml parsing
of the stream. When a complete 'protocol element' ( meaning a
complete child of the xmlstreams root ) is parsed the dipatch
method is called with a 'Node' instance of this structure.
The Node class is a very simple XML DOM like class for
manipulating XML documents or 'protocol elements' in this
case.
"""
# $Id: xmlstream.py,v 1.45 2004/02/03 16:33:37 snakeru Exp $
import time
import sys
import re
import socket
import logging
from base64 import encodestring
import xml.parsers.expat
import cgi
from xml.sax.saxutils import unescape, escape
__version__ = VERSION = "0.5"
ENCODING = 'utf-8' # Though it is uncommon, this is the only right setting.
ustr = str
BLOCK_SIZE = 1024 ## Number of bytes to get at at time via socket
## transactions
def XMLescape(txt):
"Escape XML entities"
#logging.debug("XMLescape - incoming - %s" % txt)
if not txt:
return txt
txt = txt.replace("&", "&")
txt = txt.replace("<", "<")
txt = txt.replace(">", ">")
#txt = txt.replace('"', "'")
return txt
def XMLunescape(txt):
"Unescape XML entities"
if not txt:
return txt
txt = txt.replace(">", ">")
txt = txt.replace("<", "<")
txt = txt.replace("&", "&")
txt = txt.replace("'", "'")
return txt
class error(object):
def __init__(self, value):
self.value = str(value)
def __str__(self):
return self.value
class Node(object):
"""A simple XML DOM like class"""
def __init__(self, tag=None, parent=None, attrs={}, payload=[], node=None):
if node:
if type(node)<>type(self): node=NodeBuilder(node).getDom()
self.name,self.namespace,self.attrs,self.data,self.kids,self.parent = \
node.name,node.namespace,node.attrs,node.data,node.kids,node.parent
else:
self.name,self.namespace,self.attrs,self.data,self.kids,self.parent = 'tag','',{},[],[],None
if tag: self.namespace, self.name = (['']+tag.split())[-2:]
if parent: self.parent = parent
# if self.parent and not self.namespace: self.namespace=self.parent.namespace # Doesn't checked if this neccessary
for attr in attrs.keys():
self.attrs[attr]=attrs[attr]
for i in payload:
if type(i)==type(self): self.insertNode(i)
else: self.insertXML(i)
# self.insertNode(Node(node=i)) # Alternative way. Needs perfomance testing.
def setParent(self, node):
"Set the nodes parent node."
self.parent = node
def getParent(self):
"return the nodes parent node."
return self.parent
def getName(self):
"Set the nodes tag name."
return self.name
def setName(self,val):
"Set the nodes tag name."
self.name = val
def putAttr(self, key, val):
"Add a name/value attribute to the node."
self.attrs[key] = val
def getAttr(self, key):
"Get a value for the nodes named attribute."
try: return self.attrs[key]
except: return None
def getAttributes(self):
"Get a value for the nodes named attribute."
return self.attrs
def putData(self, data):
"Set the nodes textual data"
self.data.append(data)
def insertData(self, data):
"Set the nodes textual data"
self.data.append(data)
def getData(self):
"Return the nodes textual data"
return ''.join(self.data)
def getDataAsParts(self):
"Return the node data as an array"
return self.data
def getNamespace(self):
"Returns the nodes namespace."
return self.namespace
def setNamespace(self, namespace):
"Set the nodes namespace."
self.namespace = namespace
def insertTag(self, name=None, attrs={}, payload=[], node=None):
""" Add a child tag of name 'name' to the node.
Returns the newly created node.
"""
newnode = Node(tag=name, parent=self, attrs=attrs, payload=payload, node=node)
self.kids.append(newnode)
return newnode
def insertNode(self, node):
"Add a child node to the node"
self.kids.append(node)
return node
def insertXML(self, xml_str):
"Add raw xml as a child of the node"
newnode = NodeBuilder(xml_str).getDom()
self.kids.append(newnode)
return newnode
def __str__(self):
return self._xmlnode2str()
def _xmlnode2str(self, parent=None):
"""Returns an xml ( string ) representation of the node
and it children"""
s = "<" + self.name
if self.namespace:
if parent and parent.namespace != self.namespace:
s = s + " xmlns = '%s' " % self.namespace
for key in self.attrs.keys():
val = ustr(self.attrs[key])
s = s + " %s='%s'" % ( key, XMLescape(val) )
s = s + ">"
cnt = 0
if self.kids != None:
for a in self.kids:
if (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt])
s = s + a._xmlnode2str(parent=self)
cnt=cnt+1
if (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt])
if not self.kids and s[-1:]=='>':
s=s[:-1]+' />'
else:
s = s + "</" + self.name + ">"
return s
def getTag(self, name, index=None):
"""Returns a child node with tag name. Returns None
if not found."""
for node in self.kids:
if node.getName() == name:
if not index: return node
if index is not None: index-=1
return None
def getTags(self, name):
"""Like getTag but returns a list with matching child nodes"""
nodes=[]
for node in self.kids:
if node.getName() == name:
nodes.append(node)
return nodes
def getChildren(self):
"""Returns a nodes children"""
return self.kids
def removeTag(self,tag):
"""Pops out specified child and returns it."""
if type(tag)==type(self):
try:
self.kids.remove(tag)
return tag
except: return None
for node in self.kids:
if node.getName()==tag:
self.kids.remove(node)
return node
class NodeBuilder(object):
"""builds a 'minidom' from data parsed to it. Primarily for insertXML
method of Node"""
def __init__(self,data=None):
self._parser = xml.parsers.expat.ParserCreate()
self._parser.StartElementHandler = self.unknown_starttag
self._parser.EndElementHandler = self.unknown_endtag
self._parser.CharacterDataHandler = self.handle_data
self.__depth = 0
self._dispatch_depth = 1
self.last_is_data = False
self._ptr = Node()
if data: self._parser.Parse(data,1)
def unknown_starttag(self, tag, attrs):
"""XML Parser callback"""
self.__depth = self.__depth + 1
if self.__depth == self._dispatch_depth:
self._mini_dom = Node(tag=tag, attrs=attrs)
self._ptr = self._mini_dom
elif self.__depth > self._dispatch_depth:
self._ptr.kids.append(Node(tag=tag,parent=self._ptr,attrs=attrs))
self._ptr = self._ptr.kids[-1]
else: ## it the stream tag:
if attrs.has_key('id'):
self._incomingID = attrs['id']
self.last_is_data = False
def unknown_endtag(self, tag ):
"""XML Parser callback"""
if self.__depth == self._dispatch_depth:
self.dispatch(self._mini_dom)
elif self.__depth > self._dispatch_depth:
self._ptr = self._ptr.parent
self.__depth = self.__depth - 1
self.last_is_data = False
def handle_data(self, data):
"""XML Parser callback"""
if self.last_is_data:
self._ptr.data[-1] += data
else:
self._ptr.data.append(data)
self.last_is_data = True
def dispatch(self,dom):
pass
def getDom(self):
return self._mini_dom
| Python |
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from urllib2 import Request, urlopen
import base64
import oauth
from error import TweepError
from api import API
class AuthHandler(object):
def apply_auth(self, url, method, headers, parameters):
"""Apply authentication headers to request"""
raise NotImplementedError
def get_username(self):
"""Return the username of the authenticated user"""
raise NotImplementedError
class BasicAuthHandler(AuthHandler):
def __init__(self, username, password):
self.username = username
self._b64up = base64.b64encode('%s:%s' % (username, password))
def apply_auth(self, url, method, headers, parameters):
headers['Authorization'] = 'Basic %s' % self._b64up
def get_username(self):
return self.username
class OAuthHandler(AuthHandler):
"""OAuth authentication handler"""
OAUTH_HOST = 'twitter.com'
OAUTH_ROOT = '/oauth/'
def __init__(self, consumer_key, consumer_secret, callback=None, secure=False):
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self._sigmethod = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.request_token = None
self.access_token = None
self.callback = callback
self.username = None
self.secure = secure
def _get_oauth_url(self, endpoint, secure=False):
if self.secure or secure:
prefix = 'https://'
else:
prefix = 'http://'
return prefix + self.OAUTH_HOST + self.OAUTH_ROOT + endpoint
def apply_auth(self, url, method, headers, parameters):
request = oauth.OAuthRequest.from_consumer_and_token(
self._consumer, http_url=url, http_method=method,
token=self.access_token, parameters=parameters
)
request.sign_request(self._sigmethod, self._consumer, self.access_token)
headers.update(request.to_header())
def _get_request_token(self):
try:
url = self._get_oauth_url('request_token')
request = oauth.OAuthRequest.from_consumer_and_token(
self._consumer, http_url=url, callback=self.callback
)
request.sign_request(self._sigmethod, self._consumer, None)
resp = urlopen(Request(url, headers=request.to_header()))
return oauth.OAuthToken.from_string(resp.read())
except Exception, e:
raise TweepError(e)
def set_request_token(self, key, secret):
self.request_token = oauth.OAuthToken(key, secret)
def set_access_token(self, key, secret):
self.access_token = oauth.OAuthToken(key, secret)
def get_authorization_url(self, signin_with_twitter=False):
"""Get the authorization URL to redirect the user"""
try:
# get the request token
self.request_token = self._get_request_token()
# build auth request and return as url
if signin_with_twitter:
url = self._get_oauth_url('authenticate')
else:
url = self._get_oauth_url('authorize')
request = oauth.OAuthRequest.from_token_and_callback(
token=self.request_token, http_url=url
)
return request.to_url()
except Exception, e:
raise TweepError(e)
def get_access_token(self, verifier=None):
"""
After user has authorized the request token, get access token
with user supplied verifier.
"""
try:
url = self._get_oauth_url('access_token')
# build request
request = oauth.OAuthRequest.from_consumer_and_token(
self._consumer,
token=self.request_token, http_url=url,
verifier=str(verifier)
)
request.sign_request(self._sigmethod, self._consumer, self.request_token)
# send request
resp = urlopen(Request(url, headers=request.to_header()))
self.access_token = oauth.OAuthToken.from_string(resp.read())
return self.access_token
except Exception, e:
raise TweepError(e)
def get_xauth_access_token(self, username, password):
"""
Get an access token from an username and password combination.
In order to get this working you need to create an app at
http://twitter.com/apps, after that send a mail to api@twitter.com
and request activation of xAuth for it.
"""
try:
url = self._get_oauth_url('access_token', secure=True) # must use HTTPS
request = oauth.OAuthRequest.from_consumer_and_token(
oauth_consumer=self._consumer,
http_method='POST', http_url=url,
parameters = {
'x_auth_mode': 'client_auth',
'x_auth_username': username,
'x_auth_password': password
}
)
request.sign_request(self._sigmethod, self._consumer, None)
resp = urlopen(Request(url, data=request.to_postdata()))
self.access_token = oauth.OAuthToken.from_string(resp.read())
return self.access_token
except Exception, e:
raise TweepError(e)
def get_username(self):
if self.username is None:
api = API(self)
user = api.verify_credentials()
if user:
self.username = user.screen_name
else:
raise TweepError("Unable to get username, invalid oauth token!")
return self.username
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
class TweepError(Exception):
"""Tweepy exception"""
def __init__(self, reason, response=None):
self.reason = str(reason)
self.response = response
def __str__(self):
return self.reason
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from error import TweepError
from utils import parse_datetime, parse_html_value, parse_a_href, parse_search_datetime, unescape_html
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
for obj in json_list:
results.append(cls.parse(api, obj))
return results
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
for k, v in json.items():
if k == 'user':
user = User.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name, *args, **kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs)
def lists(self, *args, **kargs):
return self._api.lists(user=self.screen_name, *args, **kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id, *args, **kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResult(Model):
@classmethod
def parse(cls, api, json):
result = cls()
for k, v in json.items():
if k == 'created_at':
setattr(result, k, parse_search_datetime(v))
elif k == 'source':
setattr(result, k, parse_html_value(unescape_html(v)))
else:
setattr(result, k, v)
return result
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
results.max_id = json_list.get('max_id')
results.since_id = json_list.get('since_id')
results.refresh_url = json_list.get('refresh_url')
results.next_page = json_list.get('next_page')
results.results_per_page = json_list.get('results_per_page')
results.page = json_list.get('page')
results.completed_in = json_list.get('completed_in')
results.query = json_list.get('query')
for obj in json_list['results']:
results.append(cls.parse(api, obj))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k,v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
for obj in json_list['lists']:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name, self.slug, **kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name, self.slug, **kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name, self.slug, id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name, self.slug, id)
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_result = SearchResult
list = List
json = JSONModel
ids = IDModel
| Python |
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from models import ModelFactory
from utils import import_simplejson
class Parser(object):
def parse(self, method, payload):
"""
Parse the response payload and return the result.
Returns a tuple that contains the result data and the cursors
(or None if not present).
"""
raise NotImplementedError
def parse_error(self, payload):
"""
Parse the error message from payload.
If unable to parse the message, throw an exception
and default error message will be used.
"""
raise NotImplementedError
class JSONParser(Parser):
payload_format = 'json'
def __init__(self):
self.json_lib = import_simplejson()
def parse(self, method, payload):
try:
json = self.json_lib.loads(payload)
except Exception, e:
raise TweepError('Failed to parse JSON payload: %s' % e)
if isinstance(json, dict) and 'previous_cursor' in json and 'next_cursor' in json:
cursors = json['previous_cursor'], json['next_cursor']
return json, cursors
else:
return json
def parse_error(self, payload):
error = self.json_lib.loads(payload)
if error.has_key('error'):
return error['error']
else:
return error['errors']
class ModelParser(JSONParser):
def __init__(self, model_factory=None):
JSONParser.__init__(self)
self.model_factory = model_factory or ModelFactory
def parse(self, method, payload):
try:
if method.payload_type is None: return
model = getattr(self.model_factory, method.payload_type)
except AttributeError:
raise TweepError('No model for this payload type: %s' % method.payload_type)
json = JSONParser.parse(self, method, payload)
if isinstance(json, tuple):
json, cursors = json
else:
cursors = None
if method.payload_list:
result = model.parse_list(method.api, json)
else:
result = model.parse(method.api, json)
if cursors:
return result, cursors
else:
return result
| Python |
# Tweepy
# Copyright 2010 Joshua Roesslein
# See LICENSE for details.
from datetime import datetime
import time
import htmlentitydefs
import re
import locale
def parse_datetime(string):
# Set locale for date parsing
loc = locale.getlocale()
locale.setlocale(locale.LC_TIME, 'C')
# We must parse datetime this way to work in python 2.4
date = datetime(*(time.strptime(string, '%a %b %d %H:%M:%S +0000 %Y')[0:6]))
# Reset locale back to the default setting
locale.setlocale(locale.LC_TIME, loc)
return date
def parse_html_value(html):
return html[html.find('>')+1:html.rfind('<')]
def parse_a_href(atag):
start = atag.find('"') + 1
end = atag.find('"', start)
return atag[start:end]
def parse_search_datetime(string):
# Set locale for date parsing
locale.setlocale(locale.LC_TIME, 'C')
# We must parse datetime this way to work in python 2.4
date = datetime(*(time.strptime(string, '%a, %d %b %Y %H:%M:%S +0000')[0:6]))
# Reset locale back to the default setting
locale.setlocale(locale.LC_TIME, '')
return date
def unescape_html(text):
"""Created by Fredrik Lundh (http://effbot.org/zone/re-sub.htm#unescape-html)"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.najsbcodepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def convert_to_utf8_str(arg):
# written by Michael Norton (http://docondev.blogspot.com/)
if isinstance(arg, unicode):
arg = arg.encode('utf-8')
elif not isinstance(arg, str):
arg = str(arg)
return arg
def import_simplejson():
try:
import simplejson as json
except ImportError:
try:
import json # Python 2.6+
except ImportError:
try: import jsb.contrib.simplejson as json
except ImportError:
#try:
# from django.utils import simplejson as json # Google App Engine
#except ImportError:
raise ImportError, "Can't load a json library"
return json
def list_to_csv(item_list):
if item_list:
return ','.join([str(i) for i in item_list])
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import httplib
import urllib
import time
import re
from error import TweepError
from utils import convert_to_utf8_str
re_path_template = re.compile('{\w+}')
def bind_api(**config):
class APIMethod(object):
path = config['path']
payload_type = config.get('payload_type', None)
payload_list = config.get('payload_list', False)
allowed_param = config.get('allowed_param', [])
method = config.get('method', 'GET')
require_auth = config.get('require_auth', False)
search_api = config.get('search_api', False)
def __init__(self, api, args, kargs):
# If authentication is required and no credentials
# are provided, throw an error.
if self.require_auth and not api.auth:
raise TweepError('Authentication required!')
self.api = api
self.post_data = kargs.pop('post_data', None)
self.retry_count = kargs.pop('retry_count', api.retry_count)
self.retry_delay = kargs.pop('retry_delay', api.retry_delay)
self.retry_errors = kargs.pop('retry_errors', api.retry_errors)
self.headers = kargs.pop('headers', {})
self.build_parameters(args, kargs)
# Pick correct URL root to use
if self.search_api:
self.api_root = api.search_root
else:
self.api_root = api.api_root
# Perform any path variable substitution
self.build_path()
if api.secure:
self.scheme = 'https://'
else:
self.scheme = 'http://'
if self.search_api:
self.host = api.search_host
else:
self.host = api.host
# Manually set Host header to fix an issue in python 2.5
# or older where Host is set including the 443 port.
# This causes Twitter to issue 301 redirect.
# See Issue http://github.com/joshthecoder/tweepy/issues/#issue/12
self.headers['Host'] = self.host
def build_parameters(self, args, kargs):
self.parameters = {}
for idx, arg in enumerate(args):
try:
self.parameters[self.allowed_param[idx]] = convert_to_utf8_str(arg)
except IndexError:
raise TweepError('Too many parameters supplied!')
for k, arg in kargs.items():
if arg is None:
continue
if k in self.parameters:
raise TweepError('Multiple values for parameter %s supplied!' % k)
self.parameters[k] = convert_to_utf8_str(arg)
def build_path(self):
for variable in re_path_template.findall(self.path):
name = variable.strip('{}')
if name == 'user' and 'user' not in self.parameters and self.api.auth:
# No 'user' parameter provided, fetch it from Auth instead.
value = self.api.auth.get_username()
else:
try:
value = urllib.quote(self.parameters[name])
except KeyError:
raise TweepError('No parameter value found for path variable: %s' % name)
del self.parameters[name]
self.path = self.path.replace(variable, value)
def execute(self):
# Build the request URL
url = self.api_root + self.path
if len(self.parameters):
url = '%s?%s' % (url, urllib.urlencode(self.parameters))
# Query the cache if one is available
# and this request uses a GET method.
if self.api.cache and self.method == 'GET':
cache_result = self.api.cache.get(url)
# if cache result found and not expired, return it
if cache_result:
# must restore api reference
if isinstance(cache_result, list):
for result in cache_result:
result._api = self.api
else:
cache_result._api = self.api
return cache_result
# Continue attempting request until successful
# or maximum number of retries is reached.
retries_performed = 0
while retries_performed < self.retry_count + 1:
# Open connection
# FIXME: add timeout
if self.api.secure:
conn = httplib.HTTPSConnection(self.host)
else:
conn = httplib.HTTPConnection(self.host)
# Apply authentication
if self.api.auth:
self.api.auth.apply_auth(
self.scheme + self.host + url,
self.method, self.headers, self.parameters
)
# Execute request
try:
conn.request(self.method, url, headers=self.headers, body=self.post_data)
resp = conn.getresponse()
except Exception, e:
raise TweepError('Failed to send request: %s' % e)
# Exit request loop if non-retry error code
if self.retry_errors:
if resp.status not in self.retry_errors: break
else:
if resp.status == 200: break
# Sleep before retrying request again
time.sleep(self.retry_delay)
retries_performed += 1
# If an error was returned, throw an exception
self.api.last_response = resp
if resp.status != 200:
try:
error_msg = self.api.parser.parse_error(resp.read())
except Exception:
error_msg = "Twitter error response: status code = %s" % resp.status
raise TweepError(error_msg, resp)
# Parse the response payload
result = self.api.parser.parse(self, resp.read())
conn.close()
# Store result into cache if one is available.
if self.api.cache and self.method == 'GET' and result:
self.api.cache.store(url, result)
return result
def _call(api, *args, **kargs):
method = APIMethod(api, args, kargs)
return method.execute()
# Set pagination mode
if 'cursor' in APIMethod.allowed_param:
_call.pagination_mode = 'cursor'
elif 'page' in APIMethod.allowed_param:
_call.pagination_mode = 'page'
return _call
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '1.7.1'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResult, ModelFactory
from error import TweepError
from api import API
from cache import Cache, MemoryCache, FileCache
from auth import BasicAuthHandler, OAuthHandler
from streaming import Stream, StreamListener
from cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import httplib
from socket import timeout
from threading import Thread
from time import sleep
import urllib
from auth import BasicAuthHandler
from models import Status
from api import API
from error import TweepError
from utils import import_simplejson
json = import_simplejson()
STREAM_VERSION = 1
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_data(self, data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, json.loads(data))
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = json.loads(data)['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'limit' in data:
if self.on_limit(json.loads(data)['limit']['track']) is False:
return False
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, username, password, listener, timeout=5.0, retry_count = None,
retry_time = 10.0, snooze_time = 5.0, buffer_size=1500, headers=None):
self.auth = BasicAuthHandler(username, password)
self.running = False
self.timeout = timeout
self.retry_count = retry_count
self.retry_time = retry_time
self.snooze_time = snooze_time
self.buffer_size = buffer_size
self.listener = listener
self.api = API()
self.headers = headers or {}
self.body = None
def _run(self):
# setup
self.auth.apply_auth(None, None, self.headers, None)
# enter loop
error_counter = 0
conn = None
while self.running:
if self.retry_count and error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
conn = httplib.HTTPConnection(self.host)
conn.connect()
conn.sock.settimeout(self.timeout)
conn.request('POST', self.url, self.body, headers=self.headers)
resp = conn.getresponse()
if resp.status != 200:
if self.listener.on_error(resp.status) is False:
break
error_counter += 1
sleep(self.retry_time)
else:
error_counter = 0
self._read_loop(resp)
except timeout:
if self.listener.on_timeout() == False:
break
if self.running is False:
break
conn.close()
sleep(self.snooze_time)
except Exception:
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if conn:
conn.close()
def _read_loop(self, resp):
data = ''
while self.running:
if resp.isclosed():
break
# read length
length = ''
while True:
c = resp.read(1)
if c == '\n':
break
length += c
length = length.strip()
if length.isdigit():
length = int(length)
else:
continue
# read data and pass into listener
data = resp.read(length)
if self.listener.on_data(data) is False:
self.running = False
def _start(self, async):
self.running = True
if async:
Thread(target=self._run).start()
else:
self._run()
def firehose(self, count=None, async=False):
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/firehose.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/retweet.json?delimited=length' % STREAM_VERSION
self._start(async)
def sample(self, count=None, async=False):
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/sample.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def filter(self, follow=None, track=None, async=False):
params = {}
self.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%i/statuses/filter.json?delimited=length' % STREAM_VERSION
if follow:
params['follow'] = ','.join(map(str, follow))
if track:
params['track'] = ','.join(map(str, track))
self.body = urllib.urlencode(params)
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import os
import mimetypes
from binder import bind_api
from error import TweepError
from parsers import ModelParser
from utils import list_to_csv
class API(object):
"""Twitter API"""
def __init__(self, auth_handler=None,
host='api.twitter.com', search_host='search.twitter.com',
cache=None, secure=False, api_root='/1', search_root='',
retry_count=0, retry_delay=0, retry_errors=None,
parser=None):
self.auth = auth_handler
self.host = host
self.search_host = search_host
self.api_root = api_root
self.search_root = search_root
self.cache = cache
self.secure = secure
self.retry_count = retry_count
self.retry_delay = retry_delay
self.retry_errors = retry_errors
self.parser = parser or ModelParser()
""" statuses/public_timeline """
public_timeline = bind_api(
path = '/statuses/public_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = []
)
""" statuses/home_timeline """
home_timeline = bind_api(
path = '/statuses/home_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/friends_timeline """
friends_timeline = bind_api(
path = '/statuses/friends_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/user_timeline """
user_timeline = bind_api(
path = '/statuses/user_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'user_id', 'screen_name', 'since_id',
'max_id', 'count', 'page']
)
""" statuses/mentions """
mentions = bind_api(
path = '/statuses/mentions.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
"""/statuses/:id/retweeted_by.format"""
retweeted_by = bind_api(
path = '/statuses/{id}/retweeted_by.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'count', 'page'],
require_auth = True
)
"""/statuses/:id/retweeted_by/ids.format"""
retweeted_by_ids = bind_api(
path = '/statuses/{id}/retweeted_by/ids.json',
payload_type = 'ids',
allowed_param = ['id', 'count', 'page'],
require_auth = True
)
""" statuses/retweeted_by_me """
retweeted_by_me = bind_api(
path = '/statuses/retweeted_by_me.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/retweeted_to_me """
retweeted_to_me = bind_api(
path = '/statuses/retweeted_to_me.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/retweets_of_me """
retweets_of_me = bind_api(
path = '/statuses/retweets_of_me.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/show """
get_status = bind_api(
path = '/statuses/show.json',
payload_type = 'status',
allowed_param = ['id']
)
""" statuses/update """
update_status = bind_api(
path = '/statuses/update.json',
method = 'POST',
payload_type = 'status',
allowed_param = ['status', 'in_reply_to_status_id', 'lat', 'long', 'source', 'place_id'],
require_auth = True
)
""" statuses/destroy """
destroy_status = bind_api(
path = '/statuses/destroy.json',
method = 'DELETE',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" statuses/retweet """
retweet = bind_api(
path = '/statuses/retweet/{id}.json',
method = 'POST',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" statuses/retweets """
retweets = bind_api(
path = '/statuses/retweets/{id}.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'count'],
require_auth = True
)
""" users/show """
get_user = bind_api(
path = '/users/show.json',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name']
)
""" Perform bulk look up of users from user ID or screenname """
def lookup_users(self, user_ids=None, screen_names=None):
return self._lookup_users(list_to_csv(user_ids), list_to_csv(screen_names))
_lookup_users = bind_api(
path = '/users/lookup.json',
payload_type = 'user', payload_list = True,
allowed_param = ['user_id', 'screen_name'],
require_auth = True
)
""" Get the authenticated user """
def me(self):
return self.get_user(screen_name=self.auth.get_username())
""" users/search """
search_users = bind_api(
path = '/users/search.json',
payload_type = 'user', payload_list = True,
require_auth = True,
allowed_param = ['q', 'per_page', 'page']
)
""" statuses/friends """
friends = bind_api(
path = '/statuses/friends.json',
payload_type = 'user', payload_list = True,
allowed_param = ['id', 'user_id', 'screen_name', 'page', 'cursor']
)
""" statuses/followers """
followers = bind_api(
path = '/statuses/followers.json',
payload_type = 'user', payload_list = True,
allowed_param = ['id', 'user_id', 'screen_name', 'page', 'cursor']
)
""" direct_messages """
direct_messages = bind_api(
path = '/direct_messages.json',
payload_type = 'direct_message', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" direct_messages/sent """
sent_direct_messages = bind_api(
path = '/direct_messages/sent.json',
payload_type = 'direct_message', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" direct_messages/new """
send_direct_message = bind_api(
path = '/direct_messages/new.json',
method = 'POST',
payload_type = 'direct_message',
allowed_param = ['user', 'screen_name', 'user_id', 'text'],
require_auth = True
)
""" direct_messages/destroy """
destroy_direct_message = bind_api(
path = '/direct_messages/destroy.json',
method = 'DELETE',
payload_type = 'direct_message',
allowed_param = ['id'],
require_auth = True
)
""" friendships/create """
create_friendship = bind_api(
path = '/friendships/create.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name', 'follow'],
require_auth = True
)
""" friendships/destroy """
destroy_friendship = bind_api(
path = '/friendships/destroy.json',
method = 'DELETE',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" friendships/exists """
exists_friendship = bind_api(
path = '/friendships/exists.json',
payload_type = 'json',
allowed_param = ['user_a', 'user_b']
)
""" friendships/show """
show_friendship = bind_api(
path = '/friendships/show.json',
payload_type = 'friendship',
allowed_param = ['source_id', 'source_screen_name',
'target_id', 'target_screen_name']
)
""" friends/ids """
friends_ids = bind_api(
path = '/friends/ids.json',
payload_type = 'ids',
allowed_param = ['id', 'user_id', 'screen_name', 'cursor']
)
""" friendships/incoming """
friendships_incoming = bind_api(
path = '/friendships/incoming.json',
payload_type = 'ids',
allowed_param = ['cursor']
)
""" friendships/outgoing"""
friendships_outgoing = bind_api(
path = '/friendships/outgoing.json',
payload_type = 'ids',
allowed_param = ['cursor']
)
""" followers/ids """
followers_ids = bind_api(
path = '/followers/ids.json',
payload_type = 'ids',
allowed_param = ['id', 'user_id', 'screen_name', 'cursor']
)
""" account/verify_credentials """
def verify_credentials(self):
try:
return bind_api(
path = '/account/verify_credentials.json',
payload_type = 'user',
require_auth = True
)(self)
except TweepError:
return False
""" account/rate_limit_status """
rate_limit_status = bind_api(
path = '/account/rate_limit_status.json',
payload_type = 'json'
)
""" account/update_delivery_device """
set_delivery_device = bind_api(
path = '/account/update_delivery_device.json',
method = 'POST',
allowed_param = ['device'],
payload_type = 'user',
require_auth = True
)
""" account/update_profile_colors """
update_profile_colors = bind_api(
path = '/account/update_profile_colors.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['profile_background_color', 'profile_text_color',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_sidebar_border_color'],
require_auth = True
)
""" account/update_profile_image """
def update_profile_image(self, filename):
headers, post_data = API._pack_image(filename, 700)
return bind_api(
path = '/account/update_profile_image.json',
method = 'POST',
payload_type = 'user',
require_auth = True
)(self, post_data=post_data, headers=headers)
""" account/update_profile_background_image """
def update_profile_background_image(self, filename, *args, **kargs):
headers, post_data = API._pack_image(filename, 800)
bind_api(
path = '/account/update_profile_background_image.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['tile'],
require_auth = True
)(self, post_data=post_data, headers=headers)
""" account/update_profile """
update_profile = bind_api(
path = '/account/update_profile.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['name', 'url', 'location', 'description'],
require_auth = True
)
""" favorites """
favorites = bind_api(
path = '/favorites.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'page']
)
""" favorites/create """
create_favorite = bind_api(
path = '/favorites/create/{id}.json',
method = 'POST',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" favorites/destroy """
destroy_favorite = bind_api(
path = '/favorites/destroy/{id}.json',
method = 'DELETE',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" notifications/follow """
enable_notifications = bind_api(
path = '/notifications/follow.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" notifications/leave """
disable_notifications = bind_api(
path = '/notifications/leave.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" blocks/create """
create_block = bind_api(
path = '/blocks/create.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" blocks/destroy """
destroy_block = bind_api(
path = '/blocks/destroy.json',
method = 'DELETE',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" blocks/exists """
def exists_block(self, *args, **kargs):
try:
bind_api(
path = '/blocks/exists.json',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)(self, *args, **kargs)
except TweepError:
return False
return True
""" blocks/blocking """
blocks = bind_api(
path = '/blocks/blocking.json',
payload_type = 'user', payload_list = True,
allowed_param = ['page'],
require_auth = True
)
""" blocks/blocking/ids """
blocks_ids = bind_api(
path = '/blocks/blocking/ids.json',
payload_type = 'json',
require_auth = True
)
""" report_spam """
report_spam = bind_api(
path = '/report_spam.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" saved_searches """
saved_searches = bind_api(
path = '/saved_searches.json',
payload_type = 'saved_search', payload_list = True,
require_auth = True
)
""" saved_searches/show """
get_saved_search = bind_api(
path = '/saved_searches/show/{id}.json',
payload_type = 'saved_search',
allowed_param = ['id'],
require_auth = True
)
""" saved_searches/create """
create_saved_search = bind_api(
path = '/saved_searches/create.json',
method = 'POST',
payload_type = 'saved_search',
allowed_param = ['query'],
require_auth = True
)
""" saved_searches/destroy """
destroy_saved_search = bind_api(
path = '/saved_searches/destroy/{id}.json',
method = 'DELETE',
payload_type = 'saved_search',
allowed_param = ['id'],
require_auth = True
)
""" help/test """
def test(self):
try:
bind_api(
path = '/help/test.json',
)(self)
except TweepError:
return False
return True
def create_list(self, *args, **kargs):
return bind_api(
path = '/%s/lists.json' % self.auth.get_username(),
method = 'POST',
payload_type = 'list',
allowed_param = ['name', 'mode', 'description'],
require_auth = True
)(self, *args, **kargs)
def destroy_list(self, slug):
return bind_api(
path = '/%s/lists/%s.json' % (self.auth.get_username(), slug),
method = 'DELETE',
payload_type = 'list',
require_auth = True
)(self)
def update_list(self, slug, *args, **kargs):
return bind_api(
path = '/%s/lists/%s.json' % (self.auth.get_username(), slug),
method = 'POST',
payload_type = 'list',
allowed_param = ['name', 'mode', 'description'],
require_auth = True
)(self, *args, **kargs)
lists = bind_api(
path = '/{user}/lists.json',
payload_type = 'list', payload_list = True,
allowed_param = ['user', 'cursor'],
require_auth = True
)
lists_memberships = bind_api(
path = '/{user}/lists/memberships.json',
payload_type = 'list', payload_list = True,
allowed_param = ['user', 'cursor'],
require_auth = True
)
lists_subscriptions = bind_api(
path = '/{user}/lists/subscriptions.json',
payload_type = 'list', payload_list = True,
allowed_param = ['user', 'cursor'],
require_auth = True
)
list_timeline = bind_api(
path = '/{owner}/lists/{slug}/statuses.json',
payload_type = 'status', payload_list = True,
allowed_param = ['owner', 'slug', 'since_id', 'max_id', 'per_page', 'page']
)
get_list = bind_api(
path = '/{owner}/lists/{slug}.json',
payload_type = 'list',
allowed_param = ['owner', 'slug']
)
def add_list_member(self, slug, *args, **kargs):
return bind_api(
path = '/%s/%s/members.json' % (self.auth.get_username(), slug),
method = 'POST',
payload_type = 'list',
allowed_param = ['id'],
require_auth = True
)(self, *args, **kargs)
def remove_list_member(self, slug, *args, **kargs):
return bind_api(
path = '/%s/%s/members.json' % (self.auth.get_username(), slug),
method = 'DELETE',
payload_type = 'list',
allowed_param = ['id'],
require_auth = True
)(self, *args, **kargs)
list_members = bind_api(
path = '/{owner}/{slug}/members.json',
payload_type = 'user', payload_list = True,
allowed_param = ['owner', 'slug', 'cursor']
)
def is_list_member(self, owner, slug, user_id):
try:
return bind_api(
path = '/%s/%s/members/%s.json' % (owner, slug, user_id),
payload_type = 'user'
)(self)
except TweepError:
return False
subscribe_list = bind_api(
path = '/{owner}/{slug}/subscribers.json',
method = 'POST',
payload_type = 'list',
allowed_param = ['owner', 'slug'],
require_auth = True
)
unsubscribe_list = bind_api(
path = '/{owner}/{slug}/subscribers.json',
method = 'DELETE',
payload_type = 'list',
allowed_param = ['owner', 'slug'],
require_auth = True
)
list_subscribers = bind_api(
path = '/{owner}/{slug}/subscribers.json',
payload_type = 'user', payload_list = True,
allowed_param = ['owner', 'slug', 'cursor']
)
def is_subscribed_list(self, owner, slug, user_id):
try:
return bind_api(
path = '/%s/%s/subscribers/%s.json' % (owner, slug, user_id),
payload_type = 'user'
)(self)
except TweepError:
return False
""" trends/available """
trends_available = bind_api(
path = '/trends/available.json',
payload_type = 'json',
allowed_param = ['lat', 'long']
)
""" trends/location """
trends_location = bind_api(
path = '/trends/{woeid}.json',
payload_type = 'json',
allowed_param = ['woeid']
)
""" search """
search = bind_api(
search_api = True,
path = '/search.json',
payload_type = 'search_result', payload_list = True,
allowed_param = ['q', 'lang', 'locale', 'rpp', 'page', 'since_id', 'geocode', 'show_user', 'max_id', 'since', 'until', 'result_type']
)
search.pagination_mode = 'page'
""" trends """
trends = bind_api(
path = '/trends.json',
payload_type = 'json'
)
""" trends/current """
trends_current = bind_api(
path = '/trends/current.json',
payload_type = 'json',
allowed_param = ['exclude']
)
""" trends/daily """
trends_daily = bind_api(
path = '/trends/daily.json',
payload_type = 'json',
allowed_param = ['date', 'exclude']
)
""" trends/weekly """
trends_weekly = bind_api(
path = '/trends/weekly.json',
payload_type = 'json',
allowed_param = ['date', 'exclude']
)
""" geo/reverse_geocode """
reverse_geocode = bind_api(
path = '/geo/reverse_geocode.json',
payload_type = 'json',
allowed_param = ['lat', 'long', 'accuracy', 'granularity', 'max_results']
)
""" geo/nearby_places """
nearby_places = bind_api(
path = '/geo/nearby_places.json',
payload_type = 'json',
allowed_param = ['lat', 'long', 'ip', 'accuracy', 'granularity', 'max_results']
)
""" geo/id """
geo_id = bind_api(
path = '/geo/id/{id}.json',
payload_type = 'json',
allowed_param = ['id']
)
""" Internal use only """
@staticmethod
def _pack_image(filename, max_size):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than 700kb.')
except os.error, e:
raise TweepError('Unable to access file')
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
BOUNDARY = 'Tw3ePy'
body = []
body.append('--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="image"; filename="%s"' % filename)
body.append('Content-Type: %s' % file_type)
body.append('')
body.append(fp.read())
body.append('--' + BOUNDARY + '--')
body.append('')
fp.close()
body = '\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': len(body)
}
return headers, body
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from error import TweepError
class Cursor(object):
"""Pagination helper class"""
def __init__(self, method, *args, **kargs):
if hasattr(method, 'pagination_mode'):
if method.pagination_mode == 'cursor':
self.iterator = CursorIterator(method, args, kargs)
else:
self.iterator = PageIterator(method, args, kargs)
else:
raise TweepError('This method does not perform pagination')
def pages(self, limit=0):
"""Return iterator for pages"""
if limit > 0:
self.iterator.limit = limit
return self.iterator
def items(self, limit=0):
"""Return iterator for items in each page"""
i = ItemIterator(self.iterator)
i.limit = limit
return i
class BaseIterator(object):
def __init__(self, method, args, kargs):
self.method = method
self.args = args
self.kargs = kargs
self.limit = 0
def next(self):
raise NotImplementedError
def prev(self):
raise NotImplementedError
def __iter__(self):
return self
class CursorIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.next_cursor = -1
self.prev_cursor = 0
self.count = 0
def next(self):
if self.next_cursor == 0 or (self.limit and self.count == self.limit):
raise StopIteration
data, cursors = self.method(
cursor=self.next_cursor, *self.args, **self.kargs
)
self.prev_cursor, self.next_cursor = cursors
if len(data) == 0:
raise StopIteration
self.count += 1
return data
def prev(self):
if self.prev_cursor == 0:
raise TweepError('Can not page back more, at first page')
data, self.next_cursor, self.prev_cursor = self.method(
cursor=self.prev_cursor, *self.args, **self.kargs
)
self.count -= 1
return data
class PageIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.current_page = 0
def next(self):
self.current_page += 1
items = self.method(page=self.current_page, *self.args, **self.kargs)
if len(items) == 0 or (self.limit > 0 and self.current_page > self.limit):
raise StopIteration
return items
def prev(self):
if (self.current_page == 1):
raise TweepError('Can not page back more, at first page')
self.current_page -= 1
return self.method(page=self.current_page, *self.args, **self.kargs)
class ItemIterator(BaseIterator):
def __init__(self, page_iterator):
self.page_iterator = page_iterator
self.limit = 0
self.current_page = None
self.page_index = -1
self.count = 0
def next(self):
if self.limit > 0 and self.count == self.limit:
raise StopIteration
if self.current_page is None or self.page_index == len(self.current_page) - 1:
# Reached end of current page, get the next page...
self.current_page = self.page_iterator.next()
self.page_index = -1
self.page_index += 1
self.count += 1
return self.current_page[self.page_index]
def prev(self):
if self.current_page is None:
raise TweepError('Can not go back more, at first page')
if self.page_index == 0:
# At the beginning of the current page, move to next...
self.current_page = self.page_iterator.prev()
self.page_index = len(self.current_page)
if self.page_index == 0:
raise TweepError('No more items')
self.page_index -= 1
self.count -= 1
return self.current_page[self.page_index]
| Python |
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import time
import threading
import os
import cPickle as pickle
try:
import hashlib
except ImportError:
# python 2.4
import md5 as hashlib
try:
import fcntl
except ImportError:
# Probably on a windows system
# TODO: use win32file
pass
class Cache(object):
"""Cache interface"""
def __init__(self, timeout=60):
"""Initialize the cache
timeout: number of seconds to keep a cached entry
"""
self.timeout = timeout
def store(self, key, value):
"""Add new record to cache
key: entry key
value: data of entry
"""
raise NotImplementedError
def get(self, key, timeout=None):
"""Get cached entry if exists and not expired
key: which entry to get
timeout: override timeout with this value [optional]
"""
raise NotImplementedError
def count(self):
"""Get count of entries currently stored in cache"""
raise NotImplementedError
def cleanup(self):
"""Delete any expired entries in cache."""
raise NotImplementedError
def flush(self):
"""Delete all cached entries"""
raise NotImplementedError
class MemoryCache(Cache):
"""In-memory cache"""
def __init__(self, timeout=60):
Cache.__init__(self, timeout)
self._entries = {}
self.lock = threading.Lock()
def __getstate__(self):
# pickle
return {'entries': self._entries, 'timeout': self.timeout}
def __setstate__(self, state):
# unpickle
self.lock = threading.Lock()
self._entries = state['entries']
self.timeout = state['timeout']
def _is_expired(self, entry, timeout):
return timeout > 0 and (time.time() - entry[0]) >= timeout
def store(self, key, value):
self.lock.acquire()
self._entries[key] = (time.time(), value)
self.lock.release()
def get(self, key, timeout=None):
self.lock.acquire()
try:
# check to see if we have this key
entry = self._entries.get(key)
if not entry:
# no hit, return nothing
return None
# use provided timeout in arguments if provided
# otherwise use the one provided during init.
if timeout is None:
timeout = self.timeout
# make sure entry is not expired
if self._is_expired(entry, timeout):
# entry expired, delete and return nothing
del self._entries[key]
return None
# entry found and not expired, return it
return entry[1]
finally:
self.lock.release()
def count(self):
return len(self._entries)
def cleanup(self):
self.lock.acquire()
try:
for k, v in self._entries.items():
if self._is_expired(v, self.timeout):
del self._entries[k]
finally:
self.lock.release()
def flush(self):
self.lock.acquire()
self._entries.clear()
self.lock.release()
class FileCache(Cache):
"""File-based cache"""
# locks used to make cache thread-safe
cache_locks = {}
def __init__(self, cache_dir, timeout=60):
Cache.__init__(self, timeout)
if os.path.exists(cache_dir) is False:
os.mkdir(cache_dir)
self.cache_dir = cache_dir
if cache_dir in FileCache.cache_locks:
self.lock = FileCache.cache_locks[cache_dir]
else:
self.lock = threading.Lock()
FileCache.cache_locks[cache_dir] = self.lock
if os.name == 'posix':
self._lock_file = self._lock_file_posix
self._unlock_file = self._unlock_file_posix
elif os.name == 'nt':
self._lock_file = self._lock_file_win32
self._unlock_file = self._unlock_file_win32
else:
print 'Warning! FileCache locking not supported on this system!'
self._lock_file = self._lock_file_dummy
self._unlock_file = self._unlock_file_dummy
def _get_path(self, key):
md5 = hashlib.md5()
md5.update(key)
return os.path.join(self.cache_dir, md5.hexdigest())
def _lock_file_dummy(self, path, exclusive=True):
return None
def _unlock_file_dummy(self, lock):
return
def _lock_file_posix(self, path, exclusive=True):
lock_path = path + '.lock'
if exclusive is True:
f_lock = open(lock_path, 'w')
fcntl.lockf(f_lock, fcntl.LOCK_EX)
else:
f_lock = open(lock_path, 'r')
fcntl.lockf(f_lock, fcntl.LOCK_SH)
if os.path.exists(lock_path) is False:
f_lock.close()
return None
return f_lock
def _unlock_file_posix(self, lock):
lock.close()
def _lock_file_win32(self, path, exclusive=True):
# TODO: implement
return None
def _unlock_file_win32(self, lock):
# TODO: implement
return
def _delete_file(self, path):
os.remove(path)
if os.path.exists(path + '.lock'):
os.remove(path + '.lock')
def store(self, key, value):
path = self._get_path(key)
self.lock.acquire()
try:
# acquire lock and open file
f_lock = self._lock_file(path)
datafile = open(path, 'wb')
# write data
pickle.dump((time.time(), value), datafile)
# close and unlock file
datafile.close()
self._unlock_file(f_lock)
finally:
self.lock.release()
def get(self, key, timeout=None):
return self._get(self._get_path(key), timeout)
def _get(self, path, timeout):
if os.path.exists(path) is False:
# no record
return None
self.lock.acquire()
try:
# acquire lock and open
f_lock = self._lock_file(path, False)
datafile = open(path, 'rb')
# read pickled object
created_time, value = pickle.load(datafile)
datafile.close()
# check if value is expired
if timeout is None:
timeout = self.timeout
if timeout > 0 and (time.time() - created_time) >= timeout:
# expired! delete from cache
value = None
self._delete_file(path)
# unlock and return result
self._unlock_file(f_lock)
return value
finally:
self.lock.release()
def count(self):
c = 0
for entry in os.listdir(self.cache_dir):
if entry.endswith('.lock'):
continue
c += 1
return c
def cleanup(self):
for entry in os.listdir(self.cache_dir):
if entry.endswith('.lock'):
continue
self._get(os.path.join(self.cache_dir, entry), None)
def flush(self):
for entry in os.listdir(self.cache_dir):
if entry.endswith('.lock'):
continue
self._delete_file(os.path.join(self.cache_dir, entry))
| Python |
# -*- coding: utf-8 -*-
"""
requests.core
~~~~~~~~~~~~~
This module implements the main Requests system.
:copyright: (c) 2011 by Kenneth Reitz.
:license: ISC, see LICENSE for more details.
"""
from __future__ import absolute_import
import urllib
import urllib2
from urllib2 import HTTPError
from urlparse import urlparse
from .packages.poster.encode import multipart_encode
from .packages.poster.streaminghttp import register_openers, get_handlers
__title__ = 'requests'
__version__ = '0.3.1'
__build__ = 0x000301
__author__ = 'Kenneth Reitz'
__license__ = 'ISC'
__copyright__ = 'Copyright 2011 Kenneth Reitz'
__all__ = [
'Request', 'Response', 'request', 'get', 'head', 'post', 'put', 'delete',
'auth_manager', 'AuthObject','RequestException', 'AuthenticationError',
'URLRequired', 'InvalidMethod', 'HTTPError'
]
class _Request(urllib2.Request):
"""Hidden wrapper around the urllib2.Request object. Allows for manual
setting of HTTP methods.
"""
def __init__(self, url, data=None, headers={}, origin_req_host=None, unverifiable=False, method=None):
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
self.method = method
def get_method(self):
if self.method:
return self.method
return urllib2.Request.get_method(self)
class Request(object):
"""The :class:`Request` object. It carries out all functionality of
Requests. Recommended interface is with the Requests functions.
"""
_METHODS = ('GET', 'HEAD', 'PUT', 'POST', 'DELETE')
def __init__(self, url=None, headers=dict(), files=None, method=None,
data=dict(), auth=None, cookiejar=None):
self.url = url
self.headers = headers
self.files = files
self.method = method
self.data = data
# url encode data if it's a dict
if hasattr(data, 'items'):
self._enc_data = urllib.urlencode(data)
else:
self._enc_data = data
self.response = Response()
if isinstance(auth, (list, tuple)):
auth = AuthObject(*auth)
if not auth:
auth = auth_manager.get_auth(self.url)
self.auth = auth
self.cookiejar = cookiejar
self.sent = False
def __repr__(self):
return '<Request [%s]>' % (self.method)
def __setattr__(self, name, value):
if (name == 'method') and (value):
if not value in self._METHODS:
raise InvalidMethod()
object.__setattr__(self, name, value)
def _checks(self):
"""Deterministic checks for consistency."""
if not self.url:
raise URLRequired
def _get_opener(self):
"""Creates appropriate opener object for urllib2."""
_handlers = []
if self.cookiejar is not None:
_handlers.append(urllib2.HTTPCookieProcessor(self.cookiejar))
if self.auth:
if not isinstance(self.auth.handler, (urllib2.AbstractBasicAuthHandler, urllib2.AbstractDigestAuthHandler)):
auth_manager.add_password(self.auth.realm, self.url, self.auth.username, self.auth.password)
self.auth.handler = self.auth.handler(auth_manager)
auth_manager.add_auth(self.url, self.auth)
_handlers.append(self.auth.handler)
if not _handlers:
return urllib2.urlopen
_handlers.extend(get_handlers())
opener = urllib2.build_opener(*_handlers)
if self.headers:
# Allow default headers in the opener to be overloaded
normal_keys = [k.capitalize() for k in self.headers]
for key, val in opener.addheaders[:]:
if key not in normal_keys:
continue
# Remove it, we have a value to take its place
opener.addheaders.remove((key, val))
return opener.open
def _build_response(self, resp):
"""Build internal Response object from given response."""
self.response.status_code = getattr(resp, 'code', None)
self.response.headers = getattr(resp.info(), 'dict', None)
self.response.url = getattr(resp, 'url', None)
self.response.content = resp.read()
@staticmethod
def _build_url(url, data):
"""Build URLs."""
if urlparse(url).query:
return '%s&%s' % (url, data)
else:
if data:
return '%s?%s' % (url, data)
else:
return url
def send(self, anyway=False):
"""Sends the request. Returns True of successful, false if not.
If there was an HTTPError during transmission,
self.response.status_code will contain the HTTPError code.
Once a request is successfully sent, `sent` will equal True.
:param anyway: If True, request will be sent, even if it has
already been sent.
"""
self._checks()
success = False
if self.method in ('GET', 'HEAD', 'DELETE'):
req = _Request(self._build_url(self.url, self._enc_data), method=self.method)
else:
if self.files:
register_openers()
if self.data:
self.files.update(self.data)
datagen, headers = multipart_encode(self.files)
req = _Request(self.url, data=datagen, headers=headers, method=self.method)
else:
req = _Request(self.url, data=self._enc_data, method=self.method)
if self.headers:
req.headers.update(self.headers)
if not self.sent or anyway:
try:
opener = self._get_opener()
resp = opener(req)
if self.cookiejar is not None:
self.cookiejar.extract_cookies(resp, req)
except urllib2.HTTPError, why:
self._build_response(why)
self.response.error = why
else:
self._build_response(resp)
self.response.ok = True
self.response.cached = False
else:
self.response.cached = True
self.sent = self.response.ok
return self.sent
def read(self, *args):
return self.response.read()
class Response(object):
"""The :class:`Request` object. All :class:`Request` objects contain a
:class:`Request.response <response>` attribute, which is an instance of
this class.
"""
def __init__(self):
self.content = None
self.status_code = None
self.headers = dict()
self.url = None
self.ok = False
self.error = None
self.cached = False
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __nonzero__(self):
"""Returns true if status_code is 'OK'."""
return not self.error
def raise_for_status(self):
"""Raises stored HTTPError if one exists."""
if self.error:
raise self.error
def read(self, *args):
return self.content
class AuthManager(object):
"""Authentication Manager."""
def __new__(cls):
singleton = cls.__dict__.get('__singleton__')
if singleton is not None:
return singleton
cls.__singleton__ = singleton = object.__new__(cls)
return singleton
def __init__(self):
self.passwd = {}
self._auth = {}
def __repr__(self):
return '<AuthManager [%s]>' % (self.method)
def add_auth(self, uri, auth):
"""Registers AuthObject to AuthManager."""
uri = self.reduce_uri(uri, False)
self._auth[uri] = auth
def add_password(self, realm, uri, user, passwd):
"""Adds password to AuthManager."""
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
reduced_uri = tuple([self.reduce_uri(u, False) for u in uri])
if reduced_uri not in self.passwd:
self.passwd[reduced_uri] = {}
self.passwd[reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
for uris, authinfo in self.passwd.iteritems():
reduced_authuri = self.reduce_uri(authuri, False)
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return (None, None)
def get_auth(self, uri):
uri = self.reduce_uri(uri, False)
return self._auth.get(uri, None)
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urllib2.urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = urllib2.splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = urllib2.posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
def empty(self):
self.passwd = {}
def remove(self, uri, realm=None):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
for default_port in True, False:
reduced_uri = tuple([self.reduce_uri(u, default_port) for u in uri])
del self.passwd[reduced_uri][realm]
def __contains__(self, uri):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
uri = tuple([self.reduce_uri(u, False) for u in uri])
if uri in self.passwd:
return True
return False
auth_manager = AuthManager()
class AuthObject(object):
"""The :class:`AuthObject` is a simple HTTP Authentication token. When
given to a Requests function, it enables Basic HTTP Authentication for that
Request. You can also enable Authorization for domain realms with AutoAuth.
See AutoAuth for more details.
:param username: Username to authenticate with.
:param password: Password for given username.
:param realm: (optional) the realm this auth applies to
:param handler: (optional) basic || digest || proxy_basic || proxy_digest
"""
_handlers = {
'basic': urllib2.HTTPBasicAuthHandler,
'digest': urllib2.HTTPDigestAuthHandler,
'proxy_basic': urllib2.ProxyBasicAuthHandler,
'proxy_digest': urllib2.ProxyDigestAuthHandler
}
def __init__(self, username, password, handler='basic', realm=None):
self.username = username
self.password = password
self.realm = realm
if isinstance(handler, basestring):
self.handler = self._handlers.get(handler.lower(), urllib2.HTTPBasicAuthHandler)
else:
self.handler = handler
def request(method, url, **kwargs):
"""Sends a `method` request. Returns :class:`Response` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary of GET/HEAD/DELETE Parameters to send with the :class:`Request`.
:param data: (optional) Bytes/Dictionary of PUT/POST Data to send with the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.
:param auth: (optional) AuthObject to enable Basic HTTP Auth.
"""
data = kwargs.pop('data', dict()) or kwargs.pop('params', dict())
r = Request(method=method, url=url, data=data, headers=kwargs.pop('headers', {}),
cookiejar=kwargs.pop('cookies', None), files=kwargs.pop('files', None),
auth=kwargs.pop('auth', auth_manager.get_auth(url)))
r.send()
return r.response
def get(url, params={}, headers={}, cookies=None, auth=None):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary of GET Parameters to send with the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) CookieJar object to send with the :class:`Request`.
:param auth: (optional) AuthObject to enable Basic HTTP Auth.
"""
return request('GET', url, params=params, headers=headers, cookies=cookies, auth=auth)
def head(url, params={}, headers={}, cookies=None, auth=None):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary of GET Parameters to send with the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to sent with the :class:`Request`.
:param cookies: (optional) CookieJar object to send with the :class:`Request`.
:param auth: (optional) AuthObject to enable Basic HTTP Auth.
"""
return request('HEAD', url, params=params, headers=headers, cookies=cookies, auth=auth)
def post(url, data={}, headers={}, files=None, cookies=None, auth=None):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary of POST data to send with the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to sent with the :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.
:param cookies: (optional) CookieJar object to send with the :class:`Request`.
:param auth: (optional) AuthObject to enable Basic HTTP Auth.
"""
return request('POST', url, data=data, headers=headers, files=files, cookies=cookies, auth=auth)
def put(url, data='', headers={}, files={}, cookies=None, auth=None):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Bytes of PUT Data to send with the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to sent with the :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects for multipart encoding upload.
:param cookies: (optional) CookieJar object to send with the :class:`Request`.
:param auth: (optional) AuthObject to enable Basic HTTP Auth.
"""
return request('PUT', url, data=data, headers=headers, files=files, cookies=cookies, auth=auth)
def delete(url, params={}, headers={}, cookies=None, auth=None):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary of DELETE Parameters to send with the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to sent with the :class:`Request`.
:param cookies: (optional) CookieJar object to send with the :class:`Request`.
:param auth: (optional) AuthObject to enable Basic HTTP Auth.
"""
return request('DELETE', url, params=params, headers=headers, cookies=cookies, auth=auth)
class RequestException(Exception):
"""There was an ambiguous exception that occured while handling your
request."""
class AuthenticationError(RequestException):
"""The authentication credentials provided were invalid."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class InvalidMethod(RequestException):
"""An inappropriate method was attempted."""
| Python |
# -*- coding: utf-8 -*-
"""
requests.async
~~~~~~~~~~~~~~
This module implements the main Requests system, after monkey-patching
the urllib2 module with eventlet or gevent..
:copyright: (c) 2011 by Kenneth Reitz.
:license: ISC, see LICENSE for more details.
"""
from __future__ import absolute_import
import urllib
import urllib2
from urllib2 import HTTPError
try:
import eventlet
eventlet.monkey_patch()
except ImportError:
pass
if not 'eventlet' in locals():
try:
from gevent import monkey
monkey.patch_all()
except ImportError:
pass
if not 'eventlet' in locals():
raise ImportError('No Async adaptations of urllib2 found!')
from .core import *
__all__ = [
'Request', 'Response', 'request', 'get', 'head', 'post', 'put', 'delete',
'auth_manager', 'AuthObject','RequestException', 'AuthenticationError',
'URLRequired', 'InvalidMethod', 'HTTPError'
]
| Python |
# -*- coding: utf-8 -*-
import packages
from core import *
from core import __version__
| Python |
"""Implementation of JSONEncoder
"""
import re
from decimal import Decimal
def _import_speedups():
return None, None
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
from jsb.contrib.simplejson.decoder import PosInf
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None,
use_decimal=False):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
If use_decimal is true (not the default), ``decimal.Decimal`` will
be supported directly by the encoder. For the inverse, decode JSON
with ``parse_float=decimal.Decimal``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.use_decimal = use_decimal
if isinstance(indent, (int, long)):
indent = ' ' * indent
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from simplejson import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on
# the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
key_memo = {}
if (_one_shot and c_make_encoder is not None
and self.indent is None and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.use_decimal)
try:
return _iterencode(o, 0)
finally:
key_memo.clear()
class JSONEncoderForHTML(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o, True)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
_use_decimal,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
Decimal=Decimal,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from jsb.contrib.simplejson.scanner import make_scanner
def _import_c_scanstring():
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| Python |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.2'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| Python |
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import simplejson as json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile,
object_pairs_hook=json.OrderedDict,
use_decimal=True)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
outfile.write('\n')
if __name__ == '__main__':
main()
| Python |
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| Python |
"""
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in xrange(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in xrange(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in xrange(256)]
box[1][7] = 1
for i in xrange(2, 256):
j = alog[255 - log[i]]
for t in xrange(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in xrange(256)]
for i in xrange(256):
for t in xrange(8):
cox[i][t] = B[t]
for j in xrange(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in xrange(256):
S[i] = cox[i][0] << 7
for t in xrange(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in xrange(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in xrange(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in xrange(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in xrange(4):
if i != t:
for j in xrange(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in xrange(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in xrange(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size / 4
# encryption round keys
Ke = [[0] * BC for i in xrange(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in xrange(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) / 4
# copy user material bytes into temporary ints
tk = []
for i in xrange(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in xrange(1, KC):
tk[i] ^= tk[i-1]
else:
for i in xrange(1, KC / 2):
tk[i] ^= tk[i-1]
tt = tk[KC / 2 - 1]
tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in xrange(KC / 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in xrange(1, ROUNDS):
for j in xrange(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size / 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in xrange(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size / 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in xrange(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def test():
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
t(16, 16)
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
| Python |
#!/usr/bin/env python
#
# Copyright under the latest Apache License 2.0
'''A class the inherits everything from python-twitter and allows oauth based access
Requires:
python-twitter
simplejson
oauth
'''
__author__ = "Hameedullah Khan <hameed@hameedkhan.net>"
__version__ = "0.2"
from jsb.contrib.twitter import Api, User
from jsb.imports import getjson, getoauth
json = getjson()
oauth = getoauth()
# Taken from oauth implementation at: http://github.com/harperreed/twitteroauth-python/tree/master
REQUEST_TOKEN_URL = 'https://twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'http://twitter.com/oauth/authorize'
SIGNIN_URL = 'http://twitter.com/oauth/authenticate'
class OAuthApi(Api):
def __init__(self, consumer_key, consumer_secret, access_token=None):
if access_token:
Api.__init__(self,access_token.key, access_token.secret)
else:
Api.__init__(self)
self._Consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self._signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self._access_token = access_token
def _GetOpener(self):
opener = self._urllib.build_opener()
return opener
def _FetchUrl(self,
url,
post_data=None,
parameters=None,
no_cache=None, verifier=""):
'''Fetch a URL, optionally caching for a specified time.
Args:
url: The URL to retrieve
post_data:
A dict of (str, unicode) key/value pairs. If set, POST will be used.
parameters:
A dict whose key/value pairs should encoded and added
to the query string. [OPTIONAL]
no_cache: If true, overrides the cache on the current request
Returns:
A string containing the body of the response.
'''
# Build the extra parameters dict
extra_params = {}
if self._default_params:
extra_params.update(self._default_params)
if parameters:
extra_params.update(parameters)
# Add key/value parameters to the query string of the url
#url = self._BuildUrl(url, extra_params=extra_params)
if post_data:
http_method = "POST"
extra_params.update(post_data)
else:
http_method = "GET"
req = self._makeOAuthRequest(url, parameters=extra_params,
http_method=http_method, verifier=verifier)
self._signRequest(req, self._signature_method)
# Get a url opener that can handle Oauth basic auth
opener = self._GetOpener()
#encoded_post_data = self._EncodePostData(post_data)
if post_data:
encoded_post_data = req.to_postdata()
url = req.get_normalized_http_url()
else:
url = req.to_url()
encoded_post_data = ""
no_cache=True
# Open and return the URL immediately if we're not going to cache
# OR we are posting data
if encoded_post_data or no_cache:
if encoded_post_data:
url_data = opener.open(url, encoded_post_data).read()
else:
url_data = opener.open(url).read()
opener.close()
else:
# Unique keys are a combination of the url and the username
if self._username:
key = self._username + ':' + url
else:
key = url
# See if it has been cached before
last_cached = self._cache.GetCachedTime(key)
# If the cached version is outdated then fetch another and store it
if not last_cached or time.time() >= last_cached + self._cache_timeout:
url_data = opener.open(url).read()
opener.close()
self._cache.Set(key, url_data)
else:
url_data = self._cache.Get(key)
# Always return the latest version
return url_data
def _makeOAuthRequest(self, url, token=None,
parameters=None, http_method="GET", verifier=""):
'''Make a OAuth request from url and parameters
Args:
url: The Url to use for creating OAuth Request
parameters:
The URL parameters
http_method:
The HTTP method to use
Returns:
A OAauthRequest object
'''
if not token:
token = self._access_token
request = oauth.OAuthRequest.from_consumer_and_token(
self._Consumer, token=token,
http_url=url, parameters=parameters,
http_method=http_method, verifier=verifier)
return request
def _signRequest(self, req, signature_method=oauth.OAuthSignatureMethod_HMAC_SHA1()):
'''Sign a request
Reminder: Created this function so incase
if I need to add anything to request before signing
Args:
req: The OAuth request created via _makeOAuthRequest
signate_method:
The oauth signature method to use
'''
req.sign_request(signature_method, self._Consumer, self._access_token)
def getAuthorizationURL(self, token, url=AUTHORIZATION_URL):
'''Create a signed authorization URL
Returns:
A signed OAuthRequest authorization URL
'''
req = self._makeOAuthRequest(url, token=token)
self._signRequest(req)
return req.to_url()
def getSigninURL(self, token, url=SIGNIN_URL):
'''Create a signed Sign-in URL
Returns:
A signed OAuthRequest Sign-in URL
'''
signin_url = self.getAuthorizationURL(token, url)
return signin_url
def getAccessToken(self, url=ACCESS_TOKEN_URL, verifier=""):
token = self._FetchUrl(url, no_cache=True, verifier=verifier)
return oauth.OAuthToken.from_string(token)
def getRequestToken(self, url=REQUEST_TOKEN_URL):
'''Get a Request Token from Twitter
Returns:
A OAuthToken object containing a request token
'''
resp = self._FetchUrl(url, no_cache=True)
token = oauth.OAuthToken.from_string(resp)
return token
def GetUserInfo(self, url='https://twitter.com/account/verify_credentials.json'):
'''Get user information from twitter
Returns:
Returns the twitter.User object
'''
json = self._FetchUrl(url)
data = json.loads(json)
self._CheckForTwitterError(data)
return User.NewFromJsonDict(data)
| Python |
# jsb/version.py
#
#
""" version related stuff. """
## jsb imports
from jsb.lib.datadir import getdatadir
## basic imports
import os
## defines
version = "0.7 RELEASE"
## getversion function
def getversion(txt=""):
""" return a version string. """
try: tip = open(getdatadir() + os.sep + "TIP", 'r').read()
except: tip = None
if tip: version2 = version + " " + tip
else: version2 = version
if txt: return "JSONBOT %s %s" % (version2, txt)
else: return "JSONBOT %s" % version2
| Python |
# jsb/lib/convore/event.py
#
#
""" convore event. """
## jsb imports
from jsb.utils.locking import lockdec
from jsb.lib.eventbase import EventBase
from jsb.utils.lazydict import LazyDict
from jsb.imports import getjson
## basic imports
import logging
import thread
## defines
json = getjson()
## locks
parselock = thread.allocate_lock()
locked = lockdec(parselock)
## ConvoreEvent
class ConvoreEvent(EventBase):
""" Convore Event."""
@locked
def parse(self, bot, message, root):
m = LazyDict(message)
self.root = LazyDict(root)
type = m.kind.replace("-", "_")
self.type = type
self.cbtype = "CONVORE"
self.bottype = bot.type
self.username = m.user['username']
self.userhost = "%s_%s" % ("CONVORE_USER", self.username)
self._id = m._id
self.userid = m.user['id']
try: self.channel = m.topic['id'] ; self.groupchat = True
except: self.channel = self.userid ; self.msg = True
self.auth = self.userhost
self.txt = m.message
self.nick = self.username
self.printto = self.channel
logging.debug("convore - parsed event: %s" % self.dump())
return self
| Python |
# jsb/lib/convore/bot.py
#
#
""" convore bot. """
## jsb import
from jsb.lib.botbase import BotBase
from jsb.lib.errors import NotConnected
from jsb.drivers.convore.event import ConvoreEvent
from jsb.utils.lazydict import LazyDict
from jsb.utils.exception import handle_exception
from jsb.imports import getjson, getrequests
## basic imports
import logging
import time
## defines
json = getjson()
requests = getrequests()
## ConvoreBot
class ConvoreBot(BotBase):
""" The Convore Bot. """
def __init__(self, cfg=None, usersin=None, plugs=None, botname=None, nick=None, *args, **kwargs):
BotBase.__init__(self, cfg, usersin, plugs, botname, nick, *args, **kwargs)
self.type = "convore"
self.cursor = None
if not self.state.has_key("namecache"): self.state["namecache"] = {}
if not self.state.has_key("idcache"): self.state["idcache"] = {}
self.cfg.nick = cfg.username or "jsonbot"
def post(self, endpoint, data=None):
logging.debug("%s - doing post on %s - %s" % (self.cfg.name, endpoint, data))
assert self.cfg.username
assert self.cfg.password
self.auth = requests.AuthObject(self.cfg.username, self.cfg.password)
res = requests.post("https://convore.com/api/%s" % endpoint, data or {}, auth=self.auth)
logging.debug("%s - got result %s" % (self.cfg.name, res.content))
if res.status_code == 200:
logging.debug("%s - got result %s" % (self.cfg.name, res.content))
return LazyDict(json.loads(res.content))
else: logging.error("%s - %s - %s returned code %s" % (self.cfg.name, endpoint, data, res.status_code))
def get(self, endpoint, data={}):
logging.debug("%s - doing get on %s - %s" % (self.cfg.name, endpoint, data))
self.auth = requests.AuthObject(self.cfg.username, self.cfg.password)
url = "https://convore.com/api/%s" % endpoint
res = requests.get(url, data, auth=self.auth)
if res.status_code == 200:
logging.debug("%s - got result %s" % (self.cfg.name, res.content))
return LazyDict(json.loads(res.content))
logging.error("%s - %s - %s returned code %s" % (self.cfg.name, endpoint, data, res.status_code))
def connect(self):
logging.warn("%s - authing %s" % (self.cfg.name, self.cfg.username))
r = self.get('account/verify.json')
if r: logging.warn("%s - connected" % self.cfg.name) ; self.connectok.set()
else: logging.warn("%s - auth failed - %s" % (self.cfg.name, r)) ; raise NotConnected(self.cfg.username)
def outnocb(self, printto, txt, how="msg", event=None, origin=None, html=False, *args, **kwargs):
if event and not event.chan.data.enable:
logging.warn("%s - channel %s is not enabled" % (self.cfg.name, event.chan.data.id))
return
txt = self.normalize(txt)
logging.debug("%s - out - %s - %s" % (self.cfg.name, printto, txt))
if event and event.msg:
r = self.post("messages/%s/create.json" % printto, data={"message": txt, "pasted": True})
else:
r = self.post("topics/%s/messages/create.json" % printto, data={"message": txt, "pasted": True})
def discover(self, channel):
res = self.get("groups/discover/search.json", {"q": channel })
logging.debug("%s - discover result: %s" % (self.cfg.name, str(res)))
for g in res.groups:
group = LazyDict(g)
self.state["namecache"][group.id] = group.name
self.state["idcache"][group.name] = group.id
self.state.save()
return res.groups
def join(self, channel, password=None):
if channel not in self.state['joinedchannels']: self.state['joinedchannels'].append(channel) ; self.state.save()
try:
self.join_id(self.state["idcache"][channel])
except KeyError:
chans = self.discover(channel)
self.join_id(chans[0]["id"], password)
def join_id(self, id, password=None):
logging.warn("%s - joining %s" % (self.cfg.name, id))
res = self.post("groups/%s/join.json" % id, {"group_id": id})
return res
def part(self, channel):
logging.warn("%s - leaving %s" % (self.cfg.name, channel))
try:
id = self.state["idcache"][channel]
res = self.post("groups/%s/leave.json" % id, {"group_id": id})
except: handle_exception() ; return
if channel in self.state['joinedchannels']: self.state['joinedchannels'].remove(channel) ; self.state.save()
return res
def _readloop(self):
logging.debug("%s - starting readloop" % self.cfg.name)
self.connectok.wait(15)
self.auth = requests.AuthObject(self.cfg.username, self.cfg.password)
while not self.stopped and not self.stopreadloop:
try:
time.sleep(1)
if self.cursor: result = self.get("live.json", {"cursor": self.cursor})
else: result = self.get("live.json")
if self.stopped or self.stopreadloop: break
if not result: time.sleep(20) ; continue
if result.has_key("_id"): self.cursor = result["_id"]
logging.info("%s - incoming - %s" % (self.cfg.name, str(result)))
if not result: continue
if not result.messages: continue
for message in result.messages:
try:
event = ConvoreEvent()
event.parse(self, message, result)
if event.username.lower() == self.cfg.username.lower(): continue
event.bind(self)
method = getattr(self, "handle_%s" % event.type)
method(event)
except (TypeError, AttributeError): logging.error("%s - no handler for %s kind" % (self.cfg.name, message['kind']))
except: handle_exception()
except urllib2.URLError, ex: logging.error("%s - url error - %s" % (self.cfg.name, str(ex)))
except Exception, ex: handle_exception()
logging.debug("%s - stopping readloop" % self.cfg.name)
def handle_error(self, event):
logging.error("%s - error - %s" % (self.cfg.name, event.error))
def handle_logout(self, event):
logging.info("%s - logout - %s" % (self.cfg.name, event.username))
def handle_login(self, event):
logging.info("%s - login - %s" % (self.cfg.name, event.username))
def handle_star(self, event):
pass
#logging.warn("%s - star - %s" % (self.cfg.name, str(message)))
def handle_topic(self, event):
logging.info("%s - topic - %s" % (self.cfg.name, event.dump()))
def handle_message(self, event):
self.doevent(event)
def handle_direct_message(self, event):
self.doevent(event)
| Python |
# jsb/console/event.py
#
#
""" a console event. """
## jsb imports
from jsb.lib.eventbase import EventBase
from jsb.lib.channelbase import ChannelBase
from jsb.lib.errors import NoInput
## basic imports
import getpass
import logging
import re
## ConsoleEvent class
class ConsoleEvent(EventBase):
def __deepcopy__(self, a):
""" deepcopy an console event. """
e = ConsoleEvent()
e.copyin(self)
return e
def parse(self, bot, input, console, *args, **kwargs):
""" overload this. """
if not input: raise NoInput()
self.bot = bot
self.console = console
self.nick = getpass.getuser()
self.auth = self.nick + '@' + bot.uuid
self.userhost = self.auth
self.origin = self.userhost
self.txt = input
self.usercmnd = input.split()[0]
self.channel = self.userhost
self.cbtype = self.cmnd = unicode("CONSOLE")
self.bind(bot)
| Python |
# jsb/console/bot.py
#
#
""" console bot. """
## jsb imports
from jsb.lib.datadir import getdatadir
from jsb.utils.generic import waitforqueue
from jsb.lib.errors import NoSuchCommand, NoInput
from jsb.lib.botbase import BotBase
from jsb.lib.exit import globalshutdown
from jsb.utils.generic import strippedtxt, waitevents
from jsb.utils.exception import handle_exception
from event import ConsoleEvent
## basic imports
import time
import Queue
import logging
import sys
import code
import os
import readline
import atexit
import getpass
import re
## defines
histfilepath = os.path.expanduser(getdatadir() + os.sep + "run" + os.sep + "console-history")
## HistoryConsole class
class HistoryConsole(code.InteractiveConsole):
def __init__(self, locals=None, filename="<console>", histfile=histfilepath):
self.fname = histfile
code.InteractiveConsole.__init__(self, locals, filename)
self.init_history(histfile)
def init_history(self, histfile):
readline.parse_and_bind("tab: complete")
if hasattr(readline, "read_history_file"):
try: readline.read_history_file(histfile)
except IOError: pass
def save_history(self, histfile=None):
readline.write_history_file(histfile or self.fname)
## the console
console = HistoryConsole()
## ConsoleBot
class ConsoleBot(BotBase):
ERASE_LINE = '\033[2K'
BOLD='\033[1m'
RED = '\033[91m'
YELLOW = '\033[93m'
GREEN = '\033[92m'
ENDC = '\033[0m'
def __init__(self, cfg=None, users=None, plugs=None, botname=None, *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, botname, *args, **kwargs)
self.type = "console"
def startshell(self, connect=True):
""" start the console bot. """
self.start(False)
time.sleep(0.1)
self.dostart()
while not self.stopped:
try:
input = console.raw_input("> ")
event = ConsoleEvent()
event.parse(self, input, console)
if False and input.startswith('#'):
try:
env = {"bot": self, "event": event}
env.update(locals())
env.update(globals())
console.locals.update(env)
console.runsource(input[1:])
continue
except Exception, ex:
handle_exception()
continue
self.put(event)
waitforqueue(event.resqueue)
time.sleep(0.2)
except NoInput: continue
except (KeyboardInterrupt, EOFError): break
except Exception, ex: handle_exception()
console.save_history()
def outnocb(self, printto, txt, *args, **kwargs):
txt = self.normalize(txt)
self._raw(txt)
def _raw(self, txt):
""" do raw output to the console. """
logging.info("%s - out - %s" % (self.cfg.name, txt))
sys.stdout.write(txt)
sys.stdout.write('\n')
def action(self, channel, txt, event=None):
txt = self.normalize(txt)
self._raw(txt)
def notice(self, channel, txt):
txt = self.normalize(txt)
self._raw(txt)
def exit(self):
""" called on exit. """
console.save_history()
def normalize(self, what):
what = strippedtxt(what)
what = what.replace("<b>", self.GREEN)
what = what.replace("</b>", self.ENDC)
what = what.replace("<b>", self.GREEN)
what = what.replace("</b>", self.ENDC)
if what.count(self.ENDC) % 2: what = "%s%s" % (self.ENDC, what)
return what
| Python |
# jsb/socklib/irc/irc.py
#
#
"""
an Irc object handles the connection to the irc server .. receiving,
sending, connect and reconnect code.
"""
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.generic import toenc, fromenc
from jsb.utils.generic import getrandomnick, strippedtxt
from jsb.utils.generic import fix_format, splittxt, uniqlist
from jsb.utils.locking import lockdec
from jsb.lib.botbase import BotBase
from jsb.lib.threads import start_new_thread, threaded
from jsb.utils.pdod import Pdod
from jsb.lib.channelbase import ChannelBase
from jsb.lib.morphs import inputmorphs, outputmorphs
from jsb.lib.exit import globalshutdown
from jsb.lib.config import Config
## jsb.irc imports
from ircevent import IrcEvent
## basic imports
import time
import thread
import socket
import threading
import os
import Queue
import random
import logging
import types
import re
## locks
outlock = thread.allocate_lock()
outlocked = lockdec(outlock)
## exceptions
class Irc(BotBase):
""" the irc class, provides interface to irc related stuff. """
def __init__(self, cfg=None, users=None, plugs=None, *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, *args, **kwargs)
BotBase.setstate(self)
self.type = 'irc'
self.fsock = None
self.oldsock = None
self.sock = None
self.reconnectcount = 0
self.pongcheck = False
self.nickchanged = False
self.noauto433 = False
if self.state:
if not self.state.has_key('alternick'): self.state['alternick'] = self.cfg['alternick']
if not self.state.has_key('no-op'): self.state['no-op'] = []
self.nicks401 = []
self.cfg.port = self.cfg.port or 6667
self.connecttime = 0
self.encoding = 'utf-8'
self.blocking = 1
self.lastoutput = 0
self.splitted = []
if not self.cfg.server: self.cfg.server = self.cfg.host or "localhost"
assert self.cfg.port
assert self.cfg.server
def _raw(self, txt):
""" send raw text to the server. """
if not txt or self.stopped or not self.sock:
logging.info("%s - bot is stopped .. not sending." % self.cfg.name)
return 0
try:
self.lastoutput = time.time()
itxt = toenc(txt, self.encoding)
if not txt.startswith("PONG"): logging.info(u"%s - out - %s" % (self.cfg.name, itxt))
if not self.sock: logging.warn("%s - socket disappeared - not sending." % self.cfg.name) ; return
if self.cfg.has_key('ssl') and self.cfg['ssl']: self.sock.write(itxt + '\n')
else: self.sock.send(itxt[:500] + '\n')
except UnicodeEncodeError, ex:
logging.error("%s - encoding error: %s" % (self.cfg.name, str(ex)))
return
except Exception, ex:
handle_exception()
logging.warn("%s - ERROR: can't send %s" % (self.cfg.name, str(ex)))
def _connect(self):
""" connect to server/port using nick. """
self.stopped = False
self.connecting = True
self.connectok.clear()
if self.cfg.ipv6:
self.oldsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.oldsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
assert self.oldsock
assert self.cfg.server
assert self.cfg.port
server = self.bind()
logging.warn('%s - connecting to %s - %s - %s' % (self.cfg.name, server, self.cfg.server, self.cfg.port))
self.oldsock.settimeout(30)
self.oldsock.connect((server, int(str(self.cfg.port))))
self.blocking = 1
self.oldsock.setblocking(self.blocking)
logging.warn('%s - connection ok' % self.cfg.name)
self.connected = True
self.fsock = self.oldsock.makefile("r")
self.fsock._sock.setblocking(self.blocking)
if self.blocking:
socktimeout = self.cfg['socktimeout']
if not socktimeout:
socktimeout = 500.0
else:
socktimeout = float(socktimeout)
self.oldsock.settimeout(socktimeout)
self.fsock._sock.settimeout(socktimeout)
if self.cfg.has_key('ssl') and self.cfg['ssl']:
logging.info('%s - ssl enabled' % self.cfg.name)
self.sock = socket.ssl(self.oldsock)
else: self.sock = self.oldsock
try:
self.outputlock.release()
except thread.error:
pass
self.connecttime = time.time()
return True
def bind(self):
server = self.cfg.server
elite = self.cfg['bindhost'] or Config()['bindhost']
if elite:
try:
self.oldsock.bind((elite, 0))
except socket.gaierror:
logging.warn("%s - can't bind to %s" % (self.cfg.name, elite))
if not server:
try: socket.inet_pton(socket.AF_INET6, self.cfg.server)
except socket.error: pass
else: server = self.cfg.server
if not server:
try: socket.inet_pton(socket.AF_INET, self.cfg.server)
except socket.error: pass
else: server = self.cfg.server
if not server:
ips = []
try:
for item in socket.getaddrinfo(self.cfg.server, None):
if item[0] in [socket.AF_INET, socket.AF_INET6] and item[1] == socket.SOCK_STREAM:
ip = item[4][0]
if ip not in ips: ips.append(ip)
except socket.error: pass
else: server = random.choice(ips)
return server
def _readloop(self):
""" loop on the socketfile. """
self.stopreadloop = False
self.stopped = False
doreconnect = True
timeout = 1
logging.info('%s - starting readloop' % self.cfg.name)
prevtxt = ""
while not self.stopped and not self.stopreadloop and self.sock and self.fsock:
try:
time.sleep(0.01)
if self.cfg.has_key('ssl') and self.cfg['ssl']: intxt = inputmorphs.do(self.sock.read()).split('\n')
else: intxt = inputmorphs.do(self.fsock.readline()).split('\n')
if self.stopreadloop or self.stopped:
doreconnect = 0
break
if not intxt or not intxt[0]:
doreconnect = 1
break
if prevtxt:
intxt[0] = prevtxt + intxt[0]
prevtxt = ""
if intxt[-1] != '':
prevtxt = intxt[-1]
intxt = intxt[:-1]
for r in intxt:
try:
r = strippedtxt(r.rstrip(), ["\001", "\002", "\003"])
rr = unicode(fromenc(r.rstrip(), self.encoding))
except UnicodeDecodeError:
logging.warn("%s - decode error - ignoring" % self.cfg.name)
continue
if not rr: continue
res = rr
logging.debug(u"%s - %s" % (self.cfg.name, res))
try:
ievent = IrcEvent().parse(self, res)
except Exception, ex:
handle_exception()
continue
if ievent:
self.handle_ievent(ievent)
timeout = 1
except UnicodeError:
handle_exception()
continue
except socket.timeout:
if self.stopped or self.stopreadloop: break
timeout += 1
if timeout > 2:
doreconnect = 1
logging.warn('%s - no pong received' % self.cfg.name)
break
logging.debug("%s - socket timeout" % self.cfg.name)
pingsend = self.ping()
if not pingsend:
doreconnect = 1
break
continue
except socket.sslerror, ex:
if self.stopped or self.stopreadloop: break
if not 'timed out' in str(ex):
handle_exception()
doreconnect = 1
break
timeout += 1
if timeout > 2:
doreconnect = 1
logging.warn('%s - no pong received' % self.cfg.name)
break
logging.error("%s - socket timeout" % self.cfg.name)
pingsend = self.ping()
if not pingsend:
doreconnect = 1
break
continue
except IOError, ex:
if self.blocking and 'temporarily' in str(ex):
time.sleep(0.5)
continue
if not self.stopped:
logging.error('%s - connecting error: %s' % (self.cfg.name, str(ex)))
handle_exception()
doreconnect = 1
break
except socket.error, ex:
if self.blocking and 'temporarily' in str(ex):
time.sleep(0.5)
continue
if not self.stopped:
logging.error('%s - connecting error: %s' % (self.cfg.name, str(ex)))
doreconnect = 1
except Exception, ex:
if self.stopped or self.stopreadloop:
break
logging.error("%s - error in readloop: %s" % (self.cfg.name, str(ex)))
doreconnect = 1
break
logging.info('%s - readloop stopped' % self.cfg.name)
self.connectok.clear()
self.connected = False
if doreconnect and not self.stopped:
time.sleep(2)
self.reconnect()
def logon(self):
""" log on to the network. """
time.sleep(2)
if self.password:
logging.debug('%s - sending password' % self.cfg.name)
self._raw("PASS %s" % self.password)
logging.warn('%s - registering with %s using nick %s' % (self.cfg.name, self.cfg.server, self.cfg.nick))
logging.warn('%s - this may take a while' % self.cfg.name)
username = self.cfg['username'] or "jsb"
realname = self.cfg['realname'] or "The JSON everywhere bot"
time.sleep(1)
self._raw("NICK %s" % self.cfg.nick)
time.sleep(1)
self._raw("USER %s localhost %s :%s" % (username, self.cfg.server, realname))
def _onconnect(self):
""" overload this to run after connect. """
pass
def _resume(self, data, botname, reto=None):
""" resume to server/port using nick. """
try:
if data['ssl']:
self.exit()
time.sleep(3)
self.start()
return 1
except KeyError:
pass
self.stopped = False
try:
logging.info("%s - resume - file descriptor is %s" % (self.cfg.name, data['fd']))
fd = int(data['fd'])
except (TypeError, ValueError):
fd = None
logging.error("%s - can't determine file descriptor" % self.cfg.name)
return 0
# create socket
if self.cfg.ipv6:
if fd: self.sock = socket.fromfd(fd , socket.AF_INET6, socket.SOCK_STREAM)
else: self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
if fd: self.sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
else: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(30)
self.fsock = self.sock.makefile("r")
self.sock.setblocking(self.blocking)
if self.blocking:
socktimeout = self.cfg['socktimeout']
if not socktimeout: socktimeout = 301.0
else: socktimeout = float(socktimeout)
self.sock.settimeout(socktimeout)
self.nickchanged = 0
self.connecting = False
time.sleep(2)
self._raw('PING :RESUME %s' % str(time.time()))
self.dostart(self.cfg.name, self.type)
self.connectok.set()
self.connected = True
self.reconnectcount = 0
if reto: self.say(reto, 'rebooting done')
logging.warn("%s - rebooting done" % self.cfg.name)
return True
def outnocb(self, printto, what, how='msg', *args, **kwargs):
#if printto in self.nicks401:
# logging.warn("%s - blocking %s" % (self.cfg.name, printto))
# return
what = fix_format(what)
what = self.normalize(what)
if 'socket' in repr(printto) and self.sock:
printto.send(unicode(what) + u"\n")
return True
if not printto: self._raw(what)
elif how == 'notice': self.notice(printto, what)
elif how == 'ctcp': self.ctcp(printto, what)
else: self.privmsg(printto, what)
def broadcast(self, txt):
""" broadcast txt to all joined channels. """
for i in self.state['joinedchannels']:
self.say(i, txt, speed=1)
def normalize(self, what):
txt = strippedtxt(what, ["\001", "\002", "\003"])
txt = txt.replace("<b>", "\002")
txt = txt.replace("</b>", "\002")
txt = txt.replace("<i>", "")
txt = txt.replace("</i>", "")
txt = txt.replace("<b>", "\002")
txt = txt.replace("</b>", "\002")
txt = txt.replace("<i>", "")
txt = txt.replace("</i>", "")
txt = txt.replace("<h2>", "\002")
txt = txt.replace("</h2>", "\002")
txt = txt.replace("<h3>", "\002")
txt = txt.replace("</h3>", "\002")
txt = txt.replace("<li>", "\002")
txt = txt.replace("</li>", "\002")
return txt
def save(self):
""" save state data. """
if self.state: self.state.save()
def connect(self):
"""
connect to server/port using nick .. connect can timeout so catch
exception .. reconnect if enabled.
"""
try:
self._connect()
logging.info("%s - starting logon" % self.cfg.name)
self.logon()
time.sleep(1)
self.nickchanged = 0
self.reconnectcount = 0
self._onconnect()
self.connected = True
self.connecting = False
except (socket.gaierror, socket.error), ex:
logging.error('%s - connecting error: %s' % (self.cfg.name, str(ex)))
return
except Exception, ex:
handle_exception()
logging.error('%s - connecting error: %s' % (self.cfg.name, str(ex)))
def shutdown(self):
""" shutdown the bot. """
logging.warn('%s - shutdown' % self.cfg.name)
self.stopoutputloop = 1
self.close()
self.connecting = False
self.connected = False
self.connectok.clear()
def close(self):
""" close the connection. """
try:
if self.cfg.has_key('ssl') and self.cfg['ssl']: self.oldsock.shutdown(2)
else: self.sock.shutdown(2)
except:
pass
try:
if self.cfg.has_key('ssl') and self.cfg['ssl']: self.oldsock.close()
else: self.sock.close()
self.fsock.close()
except:
pass
def handle_pong(self, ievent):
""" set pongcheck on received pong. """
logging.debug('%s - received server pong' % self.cfg.name)
self.pongcheck = 1
def sendraw(self, txt):
""" send raw text to the server. """
if self.stopped: return
logging.debug(u'%s - sending %s' % (self.cfg.name, txt))
self._raw(txt)
def fakein(self, txt):
""" do a fake ircevent. """
if not txt: return
logging.debug('%s - fakein - %s' % (self.cfg.name, txt))
self.handle_ievent(IrcEvent().parse(self, txt))
def donick(self, nick, setorig=False, save=False, whois=False):
""" change nick .. optionally set original nick and/or save to config. """
if not nick: return
self.noauto433 = True
nick = nick[:16]
self._raw('NICK %s\n' % nick)
self.noauto433 = False
def join(self, channel, password=None):
""" join channel with optional password. """
if not channel: return
if password:
self._raw('JOIN %s %s' % (channel, password))
else: self._raw('JOIN %s' % channel)
if self.state:
if channel not in self.state.data.joinedchannels:
self.state.data.joinedchannels.append(channel)
self.state.save()
def part(self, channel):
""" leave channel. """
if not channel: return
self._raw('PART %s' % channel)
try:
self.state.data['joinedchannels'].remove(channel)
self.state.save()
except (KeyError, ValueError):
pass
def who(self, who):
""" send who query. """
if not who: return
self.putonqueue(4, None, 'WHO %s' % who.strip())
def names(self, channel):
""" send names query. """
if not channel: return
self.putonqueue(4, None, 'NAMES %s' % channel)
def whois(self, who):
""" send whois query. """
if not who: return
self.putonqueue(4, None, 'WHOIS %s' % who)
def privmsg(self, printto, what):
""" send privmsg to irc server. """
if not printto or not what: return
self.send('PRIVMSG %s :%s' % (printto, what))
@outlocked
def send(self, txt):
""" send text to irc server. """
if not txt: return
if self.stopped: return
try:
now = time.time()
if self.cfg.sleepsec: timetosleep = self.cfg.sleepsec - (now - self.lastoutput)
else: timetosleep = 4 - (now - self.lastoutput)
if timetosleep > 0 and not self.cfg.nolimiter and not (time.time() - self.connecttime < 5):
logging.debug('%s - flood protect' % self.cfg.name)
time.sleep(timetosleep)
txt = txt.rstrip()
self._raw(txt)
except Exception, ex:
logging.error('%s - send error: %s' % (self.cfg.name, str(ex)))
handle_exception()
return
def voice(self, channel, who):
""" give voice. """
if not channel or not who: return
self.putonqueue(9, None, 'MODE %s +v %s' % (channel, who))
def doop(self, channel, who):
""" give ops. """
if not channel or not who: return
self._raw('MODE %s +o %s' % (channel, who))
def delop(self, channel, who):
""" de-op user. """
if not channel or not who: return
self._raw('MODE %s -o %s' % (channel, who))
def quit(self, reason='http://jsonbot.googlecode.com'):
""" send quit message. """
logging.warn('%s - sending quit - %s' % (self.cfg.name, reason))
self._raw('QUIT :%s' % reason)
def notice(self, printto, what):
""" send notice. """
if not printto or not what: return
self.putonqueue(3, None, 'NOTICE %s :%s' % (printto, what))
def ctcp(self, printto, what):
""" send ctcp privmsg. """
if not printto or not what: return
self.putonqueue(3, None, "PRIVMSG %s :\001%s\001" % (printto, what))
def ctcpreply(self, printto, what):
""" send ctcp notice. """
if not printto or not what: return
self.putonqueue(3, None, "NOTICE %s :\001%s\001" % (printto, what))
def action(self, printto, what, event=None, *args, **kwargs):
""" do action. """
if not printto or not what: return
self.putonqueue(9, None, "PRIVMSG %s :\001ACTION %s\001" % (printto, what))
def handle_ievent(self, ievent):
""" handle ircevent .. dispatch to 'handle_command' method. """
try:
if ievent.cmnd == 'JOIN' or ievent.msg:
if ievent.nick in self.nicks401:
self.nicks401.remove(ievent.nick)
logging.debug('%s - %s joined .. unignoring' % (self.cfg.name, ievent.nick))
ievent.bind(self)
method = getattr(self,'handle_' + ievent.cmnd.lower())
if method:
try:
method(ievent)
except:
handle_exception()
except AttributeError:
pass
def handle_432(self, ievent):
""" erroneous nick. """
self.handle_433(ievent)
def handle_433(self, ievent):
""" handle nick already taken. """
if self.noauto433:
return
nick = ievent.arguments[1]
alternick = self.state['alternick']
if alternick and not self.nickchanged:
logging.debug('%s - using alternick %s' % (self.cfg.name, alternick))
self.donick(alternick)
self.nickchanged = 1
return
randomnick = getrandomnick()
self._raw("NICK %s" % randomnick)
self.cfg.wantnick = self.cfg.nick
self.cfg.nick = randomnick
logging.warn('%s - ALERT: nick %s already in use/unavailable .. using randomnick %s' % (self.cfg.name, nick, randomnick))
self.nickchanged = 1
def handle_ping(self, ievent):
""" send pong response. """
if not ievent.txt: return
self._raw('PONG :%s' % ievent.txt)
def handle_001(self, ievent):
""" we are connected. """
self.connectok.set()
self.connected = True
self.whois(self.cfg.nick)
def handle_privmsg(self, ievent):
""" check if msg is ctcp or not .. return 1 on handling. """
if ievent.txt and ievent.txt[0] == '\001':
self.handle_ctcp(ievent)
return 1
def handle_notice(self, ievent):
""" handle notice event .. check for version request. """
if ievent.txt and ievent.txt.find('VERSION') != -1:
from jsb.version import getversion
self.say(ievent.nick, getversion(), None, 'notice')
return 1
def handle_ctcp(self, ievent):
""" handle client to client request .. version and ping. """
if ievent.txt.find('VERSION') != -1:
from jsb.version import getversion
self.ctcpreply(ievent.nick, 'VERSION %s' % getversion())
if ievent.txt.find('PING') != -1:
try:
pingtime = ievent.txt.split()[1]
pingtijsb = ievent.txt.split()[2]
if pingtime:
self.ctcpreply(ievent.nick, 'PING ' + pingtime + ' ' + pingtijsb)
except IndexError:
pass
def handle_error(self, ievent):
""" show error. """
txt = ievent.txt
if txt.startswith('Closing'):
if "banned" in txt.lower(): logging.error("WE ARE BANNED !! - %s - %s" % (self.cfg.server, ievent.txt)) ; self.exit()
else: logging.error("%s - %s" % (self.cfg.name, txt))
else: logging.error("%s - %s - %s" % (self.cfg.name.upper(), ", ".join(ievent.arguments[1:]), txt))
def ping(self):
""" ping the irc server. """
logging.debug('%s - sending ping' % self.cfg.name)
try:
self._raw('PING :%s' % self.cfg.server)
return 1
except Exception, ex:
logging.debug("%s - can't send ping: %s" % (self.cfg.name, str(ex)))
return 0
def handle_401(self, ievent):
""" handle 401 .. nick not available. """
pass
def handle_700(self, ievent):
""" handle 700 .. encoding request of the server. """
try:
self.encoding = ievent.arguments[1]
logging.warn('%s - 700 encoding now is %s' % (self.cfg.name, self.encoding))
except:
pass
def handle_465(self, ievent):
""" we are banned.. exit the bot. """
self.exit()
| Python |
# gozerbot/ircevent.py
#
#
# http://www.irchelp.org/irchelp/rfc/rfc2812.txt
""" an ircevent is extracted from the IRC string received from the server. """
## jsb imports
from jsb.utils.generic import toenc, fromenc, strippedtxt, fix_format
from jsb.lib.eventbase import EventBase
## basic imports
import time
import re
import types
import copy
import logging
## defines
cpy = copy.deepcopy
## Ircevent class
class IrcEvent(EventBase):
""" represents an IRC event. """
def __deepcopy__(self, bla):
e = IrcEvent()
e.copyin(self)
return e
def parse(self, bot, rawstr):
""" parse raw string into ircevent. """
self.bottype = "irc"
self.bot = bot
self.ttl = 2
rawstr = rawstr.rstrip()
splitted = re.split('\s+', rawstr)
if not rawstr[0] == ':':
assert bot.cfg
splitted.insert(0, u":%s!%s@%s" % (bot.cfg.nick, bot.cfg.username, bot.cfg.server))
rawstr = u":%s!%s@%s %s" % (bot.cfg.nick, bot.cfg.username, bot.cfg.server, rawstr)
self.prefix = splitted[0][1:]
nickuser = self.prefix.split('!')
try:
self.userhost = nickuser[1]
self.nick = nickuser[0]
except IndexError: self.userhost = None ; self.nick = None ; self.isservermsg = True
self.cmnd = splitted[1]
self.cbtype = self.cmnd
if pfc.has_key(self.cmnd):
self.arguments = splitted[2:pfc[self.cmnd]+2]
txtsplit = re.split('\s+', rawstr, pfc[self.cmnd]+2)
self.txt = txtsplit[-1]
else:
self.arguments = splitted[2:]
if self.arguments: self.target = self.arguments[0]
self.postfix = ' '.join(self.arguments)
if self.target and self.target.startswith(':'): self.txt = ' '.join(self.arguments)
if self.txt:
if self.txt[0] == ":": self.txt = self.txt[1:]
if self.txt: self.usercmnd = self.txt.split()[0]
if self.cmnd == 'PING': self.speed = 9
if self.cmnd == 'PRIVMSG':
self.channel = self.arguments[0]
if '\001' in self.txt: self.isctcp = True
elif self.cmnd == 'JOIN' or self.cmnd == 'PART':
if self.arguments: self.channel = self.arguments[0]
else: self.channel = self.txt
elif self.cmnd == 'MODE': self.channel = self.arguments[0]
elif self.cmnd == 'TOPIC': self.channel = self.arguments[0]
elif self.cmnd == 'KICK': self.channel = self.arguments[0]
elif self.cmnd == '353': self.channel = self.arguments[2]
elif self.cmnd == '324': self.channel = self.arguments[1]
if self.userhost:
self.ruserhost = self.userhost
self.stripped = self.userhost
self.auth = self.userhost
try: self.hostname = self.userhost.split("@")[1]
except: self.hostname = None
self.origtxt = self.txt
if self.channel:
self.channel = self.channel.strip()
self.origchannel = self.channel
if self.channel == self.bot.cfg.nick:
logging.warn("irc - msg detected - setting channel to %s" % self.userhost)
self.msg = True
self.channel = self.userhost
if not self.channel:
for c in self.arguments:
if c.startswith("#"): self.channel = c
try:
nr = int(self.cmnd)
if nr > 399 and not nr == 422: logging.error('%s - %s - %s - %s' % (self.bot.cfg.name, self.cmnd, self.arguments, self.txt))
except ValueError: pass
return self
def reply(self, txt, result=[], event=None, origin="", dot=u", ", nr=375, extend=0, *args, **kwargs):
""" reply to this event """
if self.checkqueues(result): return
if self.isdcc: self.bot.say(self.sock, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
elif self.msg: self.bot.say(self.nick, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
elif self.silent or (self.chan and self.chan.data and self.chan.data.silent): self.bot.say(self.nick, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
else: self.bot.say(self.channel, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
return self
## postfix count - how many arguments
pfc = {}
pfc['NICK'] = 0
pfc['QUIT'] = 0
pfc['SQUIT'] = 1
pfc['JOIN'] = 0
pfc['PART'] = 1
pfc['TOPIC'] = 1
pfc['KICK'] = 2
pfc['PRIVMSG'] = 1
pfc['NOTICE'] = 1
pfc['SQUERY'] = 1
pfc['PING'] = 0
pfc['ERROR'] = 0
pfc['AWAY'] = 0
pfc['WALLOPS'] = 0
pfc['INVITE'] = 1
pfc['001'] = 1
pfc['002'] = 1
pfc['003'] = 1
pfc['004'] = 4
pfc['005'] = 15
pfc['302'] = 1
pfc['303'] = 1
pfc['301'] = 2
pfc['305'] = 1
pfc['306'] = 1
pfc['311'] = 5
pfc['312'] = 3
pfc['313'] = 2
pfc['317'] = 3
pfc['318'] = 2
pfc['319'] = 2
pfc['314'] = 5
pfc['369'] = 2
pfc['322'] = 3
pfc['323'] = 1
pfc['325'] = 3
pfc['324'] = 4
pfc['331'] = 2
pfc['332'] = 2
pfc['341'] = 3
pfc['342'] = 2
pfc['346'] = 3
pfc['347'] = 2
pfc['348'] = 3
pfc['349'] = 2
pfc['351'] = 3
pfc['352'] = 7
pfc['315'] = 2
pfc['353'] = 3
pfc['366'] = 2
pfc['364'] = 3
pfc['365'] = 2
pfc['367'] = 2
pfc['368'] = 2
pfc['371'] = 1
pfc['374'] = 1
pfc['375'] = 1
pfc['372'] = 1
pfc['376'] = 1
pfc['381'] = 1
pfc['382'] = 2
pfc['383'] = 5
pfc['391'] = 2
pfc['392'] = 1
pfc['393'] = 1
pfc['394'] = 1
pfc['395'] = 1
pfc['262'] = 3
pfc['242'] = 1
pfc['235'] = 3
pfc['250'] = 1
pfc['251'] = 1
pfc['252'] = 2
pfc['253'] = 2
pfc['254'] = 2
pfc['255'] = 1
pfc['256'] = 2
pfc['257'] = 1
pfc['258'] = 1
pfc['259'] = 1
pfc['263'] = 2
pfc['265'] = 1
pfc['266'] = 1
pfc['401'] = 2
pfc['402'] = 2
pfc['403'] = 2
pfc['404'] = 2
pfc['405'] = 2
pfc['406'] = 2
pfc['407'] = 2
pfc['408'] = 2
pfc['409'] = 1
pfc['411'] = 1
pfc['412'] = 1
pfc['413'] = 2
pfc['414'] = 2
pfc['415'] = 2
pfc['421'] = 2
pfc['422'] = 1
pfc['423'] = 2
pfc['424'] = 1
pfc['431'] = 1
pfc['432'] = 2
pfc['433'] = 2
pfc['436'] = 2
pfc['437'] = 2
pfc['441'] = 3
pfc['442'] = 2
pfc['443'] = 3
pfc['444'] = 2
pfc['445'] = 1
pfc['446'] = 1
pfc['451'] = 1
pfc['461'] = 2
pfc['462'] = 1
pfc['463'] = 1
pfc['464'] = 1
pfc['465'] = 1
pfc['467'] = 2
pfc['471'] = 2
pfc['472'] = 2
pfc['473'] = 2
pfc['474'] = 2
pfc['475'] = 2
pfc['476'] = 2
pfc['477'] = 2
pfc['478'] = 3
pfc['481'] = 1
pfc['482'] = 2
pfc['483'] = 1
pfc['484'] = 1
pfc['485'] = 1
pfc['491'] = 1
pfc['501'] = 1
pfc['502'] = 1
pfc['700'] = 2
| Python |
# gozerbot/channels.py
#
#
"""
channel related data. implemented with a persisted dict of dicts.
:example:
key = channels[event.channel]['key']
"""
## jsb imports
from jsb.utils.pdod import Pdod
class Channels(Pdod):
"""
channels class .. per channel data.
:param fname: filename to persist the data to
:type fname: string
"""
def __init__(self, fname):
# call base constructor
Pdod.__init__(self, fname)
# make sure attributes are initialised
for j in self.data.values():
if not j.has_key('perms'):
j['perms'] = []
if not j.has_key('autovoice'):
j['autovoice'] = 0
def __setitem__(self, a, b):
# if item is not in dict initialise it to empty dict
if not self.data.has_key(a):
self.data[a] = {}
# assign data
self.data[a] = b
def getchannels(self):
"""
return channels.
"""
result = [] # list of channels found
# loop over channels
for channel in self.data.keys():
channel = channel.strip()
if channel not in result:
result.append(channel)
return result
def getchannelswithkeys(self):
"""
return channels with keys.
"""
result = []
# loop over channels gathering channel name and key
for channel in self.data.keys():
channel = channel.strip()
try:
key = self.data[channel]['key']
if not channel + ' ' + key in result:
result.append(channel + ' ' + key)
except KeyError:
if channel not in result:
result.append(channel)
return result
def getkey(self, channel):
"""
return key of channel if set.
:param channel: channel to get key from
:type channel: string
"""
try:
key = self.data[channel]['key']
except:
key = None
return key
def getnick(self, channel):
"""
return bot nick of channel if set.
:param channel: channel to get key from
:type channel: string
"""
try:
nick = self.data[channel]['nick']
except:
nick = None
return nick
| Python |
# jsb/socklib/irc/bot.py
#
#
#
"""
a bot object handles the dispatching of commands and check for callbacks
that need to be fired.
"""
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.generic import waitforqueue, uniqlist, strippedtxt
from jsb.lib.commands import cmnds
from jsb.lib.callbacks import callbacks
from jsb.lib.plugins import plugs as plugins
from jsb.lib.threads import start_new_thread, threaded
from jsb.utils.dol import Dol
from jsb.utils.pdod import Pdod
from jsb.lib.persiststate import PersistState
from jsb.lib.errors import NoSuchCommand
from jsb.lib.channelbase import ChannelBase
from jsb.lib.exit import globalshutdown
from jsb.lib.botbase import BotBase
from jsb.lib.partyline import partyline
from jsb.lib.wait import waiter
from channels import Channels
from irc import Irc
from ircevent import IrcEvent
## basic imports
import re
import socket
import struct
import Queue
import time
import os
import types
import logging
## defines
dccchatre = re.compile('\001DCC CHAT CHAT (\S+) (\d+)\001', re.I)
## classes
class IRCBot(Irc):
""" class that dispatches commands and checks for callbacks to fire. """
def __init__(self, cfg={}, users=None, plugs=None, *args, **kwargs):
Irc.__init__(self, cfg, users, plugs, *args, **kwargs)
if self.state:
if not self.state.has_key('opchan'): self.state['opchan'] = []
if not self.state.has_key('joinedchannels'): self.state['joinedchannels'] = []
def _resume(self, data, botname, reto=None):
""" resume the bot. """
if not Irc._resume(self, data, botname, reto): return 0
for channel in self.state['joinedchannels']: self.who(channel)
return 1
def _dccresume(self, sock, nick, userhost, channel=None):
""" resume dcc loop. """
if not nick or not userhost: return
start_new_thread(self._dccloop, (sock, nick, userhost, channel))
def _dcclisten(self, nick, userhost, channel):
""" accept dcc chat requests. """
try:
listenip = socket.gethostbyname(socket.gethostname())
(port, listensock) = getlistensocket(listenip)
ipip2 = socket.inet_aton(listenip)
ipip = struct.unpack('>L', ipip2)[0]
chatmsg = 'DCC CHAT CHAT %s %s' % (ipip, port)
self.ctcp(nick, chatmsg)
self.sock = sock = listensock.accept()[0]
except Exception, ex:
handle_exception()
logging.error('%s - dcc error: %s' % (self.cfg.name, str(ex)))
return
self._dodcc(sock, nick, userhost, channel)
def _dodcc(self, sock, nick, userhost, channel=None):
""" send welcome message and loop for dcc commands. """
if not nick or not userhost: return
try:
sock.send('Welcome to the JSONBOT partyline ' + nick + " ;]\n")
partylist = partyline.list_nicks()
if partylist: sock.send("people on the partyline: %s\n" % ' .. '.join(partylist))
sock.send("control character is ! .. bot broadcast is @\n")
except Exception, ex:
handle_exception()
logging.error('%s - dcc error: %s' % (self.cfg.name, str(ex)))
return
start_new_thread(self._dccloop, (sock, nick, userhost, channel))
def _dccloop(self, sock, nick, userhost, channel=None):
""" loop for dcc commands. """
sockfile = sock.makefile('r')
sock.setblocking(True)
res = ""
partyline.add_party(self, sock, nick, userhost, channel)
while 1:
time.sleep(0.001)
try:
res = sockfile.readline()
logging.debug("%s - dcc - %s got %s" % (self.cfg.name, userhost, res))
if self.stopped or not res:
logging.warn('%s - closing dcc with %s' % (self.cfg,name, nick))
partyline.del_party(nick)
return
except socket.timeout:
continue
except socket.error, ex:
try:
(errno, errstr) = ex
except:
errno = 0
errstr = str(ex)
if errno == 35 or errno == 11:
continue
else:
raise
except Exception, ex:
handle_exception()
logging.warn('%s - closing dcc with %s' % (self.cfg.name, nick))
partyline.del_party(nick)
return
try:
res = self.normalize(res)
ievent = IrcEvent()
ievent.printto = sock
ievent.bottype = "irc"
ievent.nick = nick
ievent.userhost = userhost
ievent.auth = userhost
ievent.channel = channel or ievent.userhost
ievent.origtxt = res
ievent.txt = res
ievent.cmnd = 'DCC'
ievent.cbtype = 'DCC'
ievent.bot = self
ievent.sock = sock
ievent.speed = 1
ievent.isdcc = True
ievent.msg = True
ievent.bind(self)
logging.debug("%s - dcc - constructed event" % self.cfg.name)
if ievent.txt[0] == "!":
self.doevent(ievent)
continue
elif ievent.txt[0] == "@":
partyline.say_broadcast_notself(ievent.nick, "[%s] %s" % (ievent.nick, ievent.txt))
q = Queue.Queue()
ievent.queues = [q]
ievent.txt = ievent.txt[1:]
self.doevent(ievent)
result = waitforqueue(q, 3000)
if result:
for i in result:
partyline.say_broadcast("[bot] %s" % i)
continue
else:
partyline.say_broadcast_notself(ievent.nick, "[%s] %s" % (ievent.nick, ievent.txt))
except socket.error, ex:
try:
(errno, errstr) = ex
except:
errno = 0
errstr = str(ex)
if errno == 35 or errno == 11:
continue
except Exception, ex:
handle_exception()
sockfile.close()
logging.warn('%s - closing dcc with %s' % (self.cfg.name, nick))
def _dccconnect(self, nick, userhost, addr, port):
""" connect to dcc request from nick. """
try:
port = int(port)
logging.warn("%s - dcc - connecting to %s:%s (%s)" % (self.cfg.name, addr, port, userhost))
if re.search(':', addr):
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.connect((addr, port))
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((addr, port))
except Exception, ex:
logging.error('%s - dcc error: %s' % (self.cfg.name, str(ex)))
return
self._dodcc(sock, nick, userhost, userhost)
def broadcast(self, txt):
""" broadcast txt to all joined channels. """
for i in self.state['joinedchannels']: self.say(i, txt)
def getchannelmode(self, channel):
""" send MODE request for channel. """
if not channel:
return
self.putonqueue(9, None, 'MODE %s' % channel)
def join(self, channel, password=None):
""" join a channel .. use optional password. """
chan = ChannelBase(channel, self.cfg.name)
if password:
chan.data.key = password.strip()
chan.save()
logging.warn("%s - using key %s for channel %s" % (self.cfg.name, chan.data.key, channel))
result = Irc.join(self, channel, chan.data.key)
if result != 1:
return result
got = False
if not chan.data.cc:
chan.data.cc = self.cfg.defaultcc or '!'
got = True
if not chan.data.perms:
chan.data.perms = []
got = True
if not chan.data.mode:
chan.data.mode = ""
got = True
if got:
chan.save()
self.getchannelmode(channel)
return 1
def handle_privmsg(self, ievent):
""" check if PRIVMSG is command, if so dispatch. """
if ievent.nick in self.nicks401:
logging.debug("%s - %s is available again" % (self.cfg,name, ievent.nick))
self.nicks401.remove(ievent.nick)
if not ievent.txt: return
chat = re.search(dccchatre, ievent.txt)
if chat:
if self.users.allowed(ievent.userhost, 'USER'):
start_new_thread(self._dccconnect, (ievent.nick, ievent.userhost, chat.group(1), chat.group(2)))
return
if '\001' in ievent.txt:
Irc.handle_privmsg(self, ievent)
return
ievent.bot = self
ievent.sock = self.sock
chan = ievent.channel
if chan == self.cfg.nick:
ievent.msg = True
ievent.speed = 4
ievent.printto = ievent.nick
ccs = ['!', '@', self.cfg['defaultcc']]
if ievent.isresponse:
return
if self.cfg['noccinmsg'] and self.msg:
self.put(ievent)
elif ievent.txt[0] in ccs:
self.put(ievent)
return
self.put(ievent)
def handle_join(self, ievent):
""" handle joins. """
if ievent.nick in self.nicks401:
logging.debug("%s - %s is available again" % (self.cfg.name, ievent.nick))
self.nicks401.remove(ievent.nick)
chan = ievent.channel
nick = ievent.nick
if nick == self.cfg.nick:
logging.warn("%s - joined %s" % (self.cfg.name, ievent.channel))
time.sleep(0.5)
self.who(chan)
return
logging.info("%s - %s joined %s" % (self.cfg.name, ievent.nick, ievent.channel))
self.userhosts[nick] = ievent.userhost
def handle_kick(self, ievent):
""" handle kick event. """
try:
who = ievent.arguments[1]
except IndexError:
return
chan = ievent.channel
if who == self.cfg.nick:
if chan in self.state['joinedchannels']:
self.state['joinedchannels'].remove(chan)
self.state.save()
def handle_nick(self, ievent):
""" update userhost cache on nick change. """
nick = ievent.txt
self.userhosts[nick] = ievent.userhost
if ievent.nick == self.cfg.nick or ievent.nick == self.cfg.orignick:
self.cfg['nick'] = nick
self.cfg.save()
def handle_part(self, ievent):
""" handle parts. """
chan = ievent.channel
if ievent.nick == self.cfg.nick:
logging.warn('%s - parted channel %s' % (self.cfg.name, chan))
if chan in self.state['joinedchannels']:
self.state['joinedchannels'].remove(chan)
self.state.save()
def handle_ievent(self, ievent):
""" check for callbacks, call Irc method. """
try:
Irc.handle_ievent(self, ievent)
if ievent.cmnd == 'JOIN' or ievent.msg:
if ievent.nick in self.nicks401: self.nicks401.remove(ievent.nick)
if ievent.cmnd != "PRIVMSG":
i = IrcEvent()
i.copyin(ievent)
i.bot = self
i.sock = self.sock
ievent.nocb = True
self.doevent(i)
except:
handle_exception()
def handle_quit(self, ievent):
""" check if quit is because of a split. """
if '*.' in ievent.txt or self.cfg.server in ievent.txt: self.splitted.append(ievent.nick)
def handle_mode(self, ievent):
""" check if mode is about channel if so request channel mode. """
logging.info("%s - mode change %s" % (self.cfg.name, str(ievent.arguments)))
try:
dummy = ievent.arguments[2]
except IndexError:
chan = ievent.channel
self.getchannelmode(chan)
if not ievent.chan: ievent.bind(self)
if ievent.chan:
ievent.chan.data.mode = ievent.arguments[1]
ievent.chan.save()
def handle_311(self, ievent):
""" handle 311 response .. sync with userhosts cache. """
target, nick, user, host, dummy = ievent.arguments
nick = nick
userhost = "%s@%s" % (user, host)
logging.debug('%s - adding %s to userhosts: %s' % (self.cfg.name, nick, userhost))
self.userhosts[nick] = userhost
def handle_352(self, ievent):
""" handle 352 response .. sync with userhosts cache. """
args = ievent.arguments
channel = args[1]
nick = args[5]
user = args[2]
host = args[3]
userhost = "%s@%s" % (user, host)
logging.debug('%s - adding %s to userhosts: %s' % (self.cfg.name, nick, userhost))
self.userhosts[nick] = userhost
def handle_353(self, ievent):
""" handle 353 .. check if we are op. """
userlist = ievent.txt.split()
chan = ievent.channel
for i in userlist:
if i[0] == '@' and i[1:] == self.cfg.nick:
if chan not in self.state['opchan']:
self.state['opchan'].append(chan)
def handle_324(self, ievent):
""" handle mode request responses. """
if not ievent.chan: ievent.bind(self)
ievent.chan.data.mode = ievent.arguments[2]
ievent.chan.save()
def handle_invite(self, ievent):
""" join channel if invited by OPER. """
if self.users and self.users.allowed(ievent.userhost, ['OPER', ]): self.join(ievent.txt)
def settopic(self, channel, txt):
""" set topic of channel to txt. """
self.putonqueue(7, None, 'TOPIC %s :%s' % (channel, txt))
def gettopic(self, channel, event=None):
""" get topic data. """
q = Queue.Queue()
i332 = waiter.register("332", queue=q)
i333 = waiter.register("333", queue=q)
self.putonqueue(7, None, 'TOPIC %s' % channel)
res = waitforqueue(q, 5000)
who = what = when = None
for r in res:
if not r.postfix: continue
try:
if r.cmnd == "332": what = r.txt ; waiter.ready(i332) ; continue
waiter.ready(i333)
splitted = r.postfix.split()
who = splitted[2]
when = float(splitted[3])
except (IndexError, ValueError): continue
return (what, who, when)
| Python |
# gozerbot/socket/xmpp/presence.py
#
#
""" Iq. """
## jsb imports
from jsb.lib.eventbase import EventBase
from jsb.utils.trace import whichmodule
from jsb.lib.gozerevent import GozerEvent
## basic imports
import logging
import time
## Iq class
class Iq(GozerEvent):
def __init__(self, nodedict={}):
GozerEvent.__init__(self, nodedict)
self.element = "iq"
self.jabber = True
self.cmnd = "IQ"
self.cbtype = "IQ"
self.bottype = "xmpp"
def parse(self):
""" set ircevent compatible attributes """
self.cmnd = 'Iq'
self.conn = None
self.arguments = []
try: self.nick = self.fromm.split('/')[1]
except (AttributeError, IndexError): pass
self.jid = self.jid or self.fromm
self.ruserhost = self.jid
self.userhost = str(self.jid)
self.resource = self.nick
self.stripped = self.jid.split('/')[0]
self.channel = self.fromm.split('/')[0]
self.printto = self.channel
self.origtxt = self.txt
self.time = time.time()
self.msg = None
self.rest = ' '.join(self.args)
self.sock = None
self.speed = 5
if self.type == 'groupchat': self.groupchat = True
else: self.groupchat = False
if self.txt:makeargrest(self)
self.joined = False
self.denied = False
| Python |
# jsb/socklib/xmpp/message.py
#
#
""" jabber message definition .. types can be normal, chat, groupchat,
headline or error
"""
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.trace import whichmodule
from jsb.utils.generic import toenc, fromenc, jabberstrip
from jsb.utils.locking import lockdec
from jsb.lib.eventbase import EventBase
from jsb.lib.errors import BotNotSetInEvent
from jsb.lib.gozerevent import GozerEvent
## xmpp import
from jsb.contrib.xmlstream import NodeBuilder, XMLescape, XMLunescape
## basic imports
import types
import time
import thread
import logging
import re
## locks
replylock = thread.allocate_lock()
replylocked = lockdec(replylock)
## classes
class Message(GozerEvent):
""" jabber message object. """
def __init__(self, nodedict={}):
self.element = "message"
self.jabber = True
self.cmnd = "MESSAGE"
self.cbtype = "MESSAGE"
self.bottype = "xmpp"
self.type = "normal"
GozerEvent.__init__(self, nodedict)
def __copy__(self):
return Message(self)
def __deepcopy__(self, bla):
m = Message()
m.copyin(self)
return m
def parse(self, bot=None):
""" set ircevent compat attributes. """
self.bot = bot
self.jidchange = False
#self.cmnd = 'MESSAGE'
try: self.resource = self.fromm.split('/')[1]
except IndexError: pass
self.channel = self['fromm'].split('/')[0]
self.origchannel = self.channel
self.nick = self.resource
self.jid = self.fromm
self.ruserhost = self.jid
self.userhost = self.jid
self.stripped = self.jid.split('/')[0]
self.printto = self.channel
for node in self.subelements:
try:
self.txt = node.body.data
break
except (AttributeError, ValueError):
continue
if self.txt: self.usercmnd = self.txt.split()[0]
else: self.usercmnd = ""
self.origtxt = self.txt
self.time = time.time()
if self.type == 'groupchat':
self.groupchat = True
self.auth = self.userhost
else:
self.groupchat = False
self.auth = self.stripped
self.nick = self.jid.split("@")[0]
self.msg = not self.groupchat
self.makeargs()
def errorHandler(self):
""" dispatch errors to their handlers. """
try:
code = self.get('error').code
except Exception, ex:
handle_exception()
try:
method = getattr(self, "handle_%s" % code)
if method:
logging.error('sxmpp.core - dispatching error to handler %s' % str(method))
method(self)
except AttributeError, ex: logging.error('sxmpp.core - unhandled error %s' % code)
except: handle_exception()
def normalize(self, what):
return self.bot.normalize(what)
| Python |
# jsb/socklib/xmpp/core.py
#
#
"""
this module contains the core xmpp handling functions.
"""
## jsb imports
from jsb.lib.eventbase import EventBase
from jsb.lib.config import Config
from jsb.utils.generic import toenc, jabberstrip, fromenc
from jsb.utils.lazydict import LazyDict
from jsb.utils.exception import handle_exception
from jsb.utils.locking import lockdec
from jsb.lib.threads import start_new_thread
from jsb.utils.trace import whichmodule
from jsb.lib.gozerevent import GozerEvent
from jsb.lib.fleet import getfleet
## xmpp import
from jsb.contrib.xmlstream import NodeBuilder, XMLescape, XMLunescape
## basic imports
import socket
import os
import time
import copy
import logging
import thread
import cgi
import xml
## locks
outlock = thread.allocate_lock()
inlock = thread.allocate_lock()
connectlock = thread.allocate_lock()
outlocked = lockdec(outlock)
inlocked = lockdec(inlock)
connectlocked = lockdec(connectlock)
## classes
class XMLStream(NodeBuilder):
""" XMLStream. """
def __init__(self, name=None):
if not self.cfg: raise Exception("sxmpp - config is not set")
self.cfg.name = name or self.cfg.name
if not self.cfg.name: raise Exception("bot name is not set in config file %s" % self.cfg.filename)
self.connection = None
self.encoding = "utf-8"
self.stop = False
self.result = LazyDict()
self.final = LazyDict()
self.subelements = []
self.reslist = []
self.cur = u""
self.tags = []
self.handlers = LazyDict()
self.addHandler('proceed', self.handle_proceed)
self.addHandler('message', self.handle_message)
self.addHandler('presence', self.handle_presence)
self.addHandler('iq', self.handle_iq)
self.addHandler('stream', self.handle_stream)
self.addHandler('stream:stream', self.handle_stream)
self.addHandler('stream:error', self.handle_streamerror)
self.addHandler('stream:features', self.handle_streamfeatures)
def handle_proceed(self, data):
""" default stream handler. """
logging.debug("%s - proceeding" % self.cfg.name)
def handle_stream(self, data):
""" default stream handler. """
logging.info("%s - stream - %s" % (self.cfg.name, data.dump()))
def handle_streamend(self, data):
""" default stream handler. """
logging.warn("%s - stream END - %s" % (self.cfg.name, data))
def handle_streamerror(self, data):
""" default stream error handler. """
logging.error("%s - STREAMERROR - %s" % (self.cfg.name, data.dump()))
def handle_streamfeatures(self, data):
""" default stream features handler. """
logging.debug("%s - STREAMFEATURES: %s" % (self.cfg.name, LazyDict(data).dump()))
def addHandler(self, namespace, func):
""" add a namespace handler. """
self.handlers[namespace] = func
def delHandler(self, namespace):
""" delete a namespace handler. """
del self.handlers[namespace]
def getHandler(self, namespace):
""" get a namespace handler. """
try: return self.handlers[namespace]
except KeyError: return None
def parse_one(self, data):
""" handle one xml stanza. """
NodeBuilder.__init__(self)
self._dispatch_depth = 2
try: return self._parser.Parse(data.strip())
except xml.parsers.expat.ExpatError, ex:
if 'not well-formed' in str(ex):
logging.error("%s - data is not well formed" % self.cfg.name)
logging.debug(data)
handle_exception()
logging.debug("buffer: %s previous: %s" % (self.buffer, self.prevbuffer))
return {}
logging.debug("%s - ALERT: %s - %s" % (self.cfg.name, str(ex), data))
except Exception, ex:
handle_exception()
return {}
def checkifvalid(self, data):
result = self.parse_one(data)
self.final = {}
self.reslist = []
self.tags = []
self.subelements = []
#self.buffer = ""
return result
@inlocked
def loop_one(self, data):
""" handle one xml stanza. """
if self.parse_one(data): return self.finish(data)
return {}
def _readloop(self):
""" proces all incoming data. """
logging.debug('%s - starting readloop' % self.cfg.name)
self.prevbuffer = ""
self.buffer = ""
self.error = ""
data = ""
while not self.stopped and not self.stopreadloop:
time.sleep(0.001)
try:
data = jabberstrip(fromenc(self.connection.read()))
if self.stopped or self.stopreadloop: break
logging.info(u"%s - incoming: %s" % (self.cfg.name, data))
if data.endswith("</stream:stream>"):
logging.error("%s - end of stream detected" % self.cfg.name)
self.error = "streamend"
self.disconnectHandler(Exception('remote %s disconnected' % self.cfg.host))
break
if data == "":
logging.error('%s - remote disconnected' % self.cfg.name)
self.error = 'disconnected'
self.disconnectHandler(Exception('remote %s disconnected' % self.cfg.host))
break
if True:
self.buffer = u"%s%s" % (self.buffer, data)
handlers = self.handlers.keys()
handlers.append("/")
for handler in handlers:
target = "%s>" % handler
index = self.buffer.find(target)
if index != -1:
try:
if self.loop_one(self.buffer[:index+len(target)]):
self.buffer = self.buffer[index+1+len(target):]
else:
self.buffer = ""
break
except: handle_exception()
except AttributeError, ex:
logging.error("%s - connection disappeared: %s" % (self.cfg.name, str(ex)))
self.buffer = ""
self.error = str(ex)
self.disconnectHandler(ex)
break
except xml.parsers.expat.ExpatError, ex:
logging.error("%s - %s - %s" % (self.cfg.name, str(ex), data))
self.buffer = ""
self.error = str(ex)
self.disconnectHandler(ex)
break
except Exception, ex:
handle_exception()
self.error = str(ex)
self.disconnectHandler(ex)
break
logging.warn('%s - stopping readloop .. %s' % (self.cfg.name, self.error or 'error not set'))
@outlocked
def _raw(self, stanza):
""" output a xml stanza to the socket. """
if not self.connection: return
time.sleep(0.01)
try:
stanza = stanza.strip()
if not stanza:
logging.debug("%s - no stanze provided. called from: %s" % (self.cfg.name, whichmodule()))
return
what = jabberstrip(stanza)
what = toenc(stanza)
logging.debug("%s - out - %s" % (self.cfg.name, what))
if not what.endswith('>') or not what.startswith('<'):
logging.error('%s - invalid stanza: %s' % (self.cfg.name, what))
return
if what.startswith('<stream') or what.startswith('<message') or what.startswith('<presence') or what.startswith('<iq'):
logging.debug(u"%s - sxmpp - out - %s" % (self.cfg.name, what))
try: self.connection.send(what + u"\r\n")
except AttributeError: self.connection.write(what)
else: logging.error('%s - invalid stanza: %s' % (self.cfg.name, what))
except socket.error, ex:
if 'Broken pipe' in str(ex):
logging.debug('%s - core - broken pipe .. ignoring' % self.cfg.name)
return
self.error = str(ex)
handle_exception()
except Exception, ex:
self.error = str(ex)
handle_exception()
def connect(self):
""" connect to the server. """
target = self.cfg.server or self.cfg.host
logging.warn("%s - TARGET is %s" % (self.cfg.name, target))
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(0)
self.sock.settimeout(10)
logging.warn("%s - connecting to %s:%s" % (self.cfg.name, self.cfg.server or self.cfg.host, self.cfg.port))
self.sock.connect((self.cfg.server or self.cfg.host, self.cfg.port))
self.sock.settimeout(60)
time.sleep(1)
logging.debug("%s - starting stream" % self.cfg.name)
self.sock.send('<stream:stream to="%s" xmlns="jabber:client" xmlns:stream="http://etherx.jabber.org/streams" version="1.0">\r\n' % self.cfg.user.split('@')[1])
time.sleep(3)
result = self.sock.recv(1500)
logging.debug("%s - %s" % (self.cfg.name, str(result)))
self.loop_one(result)
self.sock.send('<starttls xmlns="urn:ietf:params:xml:ns:xmpp-tls"/>\r\n')
time.sleep(3)
result = self.sock.recv(1500)
logging.debug("%s - %s" % (self.cfg.name, str(result)))
self.loop_one(result)
self.sock.settimeout(60)
self.sock.setblocking(1)
return self.dossl()
def dossl(self):
""" enable ssl on the socket. """
try:
import ssl
logging.debug("%s - wrapping ssl socket" % self.cfg.name)
self.connection = ssl.wrap_socket(self.sock)
except ImportError:
logging.debug("%s - making ssl socket" % self.cfg.name)
self.connection = socket.ssl(self.sock)
if self.connection:
return True
else:
return False
def logon(self):
""" called upon logon on the server. """
start_new_thread(self._doprocess, ())
def finish(self, data):
""" finish processing of an xml stanza. """
methods = []
self.final['subelements'] = self.subelements
for subelement in self.subelements:
logging.debug("%s - %s" % (self.cfg.name, str(subelement)))
for elem in subelement:
logging.debug("%s - setting %s handler" % (self.cfg.name, elem))
methods.append(self.getHandler(elem))
for method in methods:
if not method: continue
try:
result = GozerEvent(subelement)
result.bot = self
result.orig = data
result.jabber = True
method(result)
except Exception, ex: handle_exception()
if self.tags:
element = self.tags[0]
logging.debug("%s - setting element: %s" % (self.cfg.name, element))
else: element = 'stream'
self.final['element'] = element
method = self.getHandler(element)
if method:
try:
result = GozerEvent(self.final)
result.bot = self
result.orig = data
result.jabber = True
method(result)
except Exception, ex:
handle_exception()
result = {}
else:
logging.error("%s - can't find handler for %s" % (self.cfg.name, element))
result = {}
if result:
self.final = {}
self.reslist = []
self.tags = []
self.subelements = []
self.buffer = ""
return result
def unknown_starttag(self, tag, attrs):
""" handler called by the self._parser on start of a unknown start tag. """
NodeBuilder.unknown_starttag(self, tag, attrs)
self.cur = tag
if not self.tags: self.final.update(attrs)
else: self.result[tag] = attrs
self.tags.append(tag)
def unknown_endtag(self, tag):
""" handler called by the self._parser on start of a unknown endtag. """
NodeBuilder.unknown_endtag(self, tag)
self.result = {}
self.cur = u""
def handle_data(self, data):
""" node data handler. """
NodeBuilder.handle_data(self, data)
def dispatch(self, dom):
""" dispatch a dom to the appropiate handler. """
res = LazyDict()
parentname = dom.getName()
data = dom.getData()
if data:
self.final[parentname] = data
if parentname == 'body': self.final['txt'] = data
attrs = dom.getAttributes()
ns = dom.getNamespace()
res[parentname] = LazyDict()
res[parentname]['data'] = data
res[parentname].update(attrs)
if ns: res[parentname]['xmlns'] = ns
for child in dom.getChildren():
name = child.getName()
data = child.getData()
if data: self.final[name] = data
attrs = child.getAttributes()
ns = child.getNamespace()
res[parentname][name] = LazyDict()
res[parentname][name]['data'] = data
res[parentname][name].update(attrs)
self.final.update(attrs)
if ns: res[parentname][name]['xmlns'] = ns
self.subelements.append(res)
def disconnectHandler(self, ex):
""" handler called on disconnect. """
self.stopped = True
logging.warn('%s - disconnected: %s' % (self.cfg.name, str(ex)))
| Python |
# jsb/socklib/xmpp/JID.py
#
#
""" JID related helpers. """
## classes
class InvalidJID(BaseException):
pass
class JID(object):
""" class representing a JID. """
def __init__(self, str):
if not str:
self.full = ""
self.user = ""
self.userhost = ""
self.host = ""
self.resource = ""
return
if not self.validate(str):
raise InvalidJID(str)
self.full = str
self.userhost = self.full.split('/')[0]
try:
self.host = self.userhost.split('@')[1]
except (IndexError, ValueError):
raise InvalidJID(str)
try:
self.resource = self.full.split('/')[1]
except (IndexError, ValueError):
self.resource = u""
def validate(self, s):
""" validate a JID. """
if not '#' in s:
return True
| Python |
# jsb/socklib/xmpp/errors.py
#
#
""" xmpp error codes. """
xmpperrors = {'400': 'Bad Request',
'401': 'Unauthorized',
'402': 'Payment Required',
'403': 'Forbidden',
'404': 'Not Found',
'405': 'Not Allowed',
'406': 'Not Acceptable',
'407': 'Registration Required',
'408': 'Request Timeout',
'409': 'Conflict',
'500': 'Internal Server Error',
'501': 'Not Implemented',
'502': 'Remove Server Error',
'503': 'Service Unavailable',
'504': 'Remove Server Timeout',
'510': 'Disconnected'
}
| Python |
# gozerbot/xmpp/namespace.py
#
#
## CONSTANTS
attributes = {}
subelements = {}
attributes['message'] = ['type', 'from', 'to', 'id']
subelements['message'] = ['subject', 'body', 'error', 'thread', 'x']
attributes['presence'] = ['type', 'from', 'to', 'id']
subelements['presence'] = ['show', 'status', 'priority', 'x']
attributes['iq'] = ['type', 'from', 'to', 'id']
subelements['iq'] = ['query', 'error']
| Python |
# jsb/socklib/xmpp/wait.py
#
#
""" wait for ircevent based on ircevent.CMND """
## jsb imports
from jsb.utils.locking import lockdec
from jsb.lib.wait import Wait
import jsb.lib.threads as thr
## basic imports
import time
import thread
import logging
## locks
waitlock = thread.allocate_lock()
locked = lockdec(waitlock)
## classes
class XMPPWait(Wait):
""" wait object for jabber messages. """
def register(self, catch, queue, timeout=15):
""" register wait for privmsg. """
logging.debug('xmpp.wait - registering for %s' % catch)
self.ticket += 1
self.waitlist.append((catch, queue, self.ticket))
if timeout:
thr.start_new_thread(self.dotimeout, (timeout, self.ticket))
return self.ticket
def check(self, msg):
""" check if <msg> is waited for. """
for teller in range(len(self.waitlist)-1, -1, -1):
i = self.waitlist[teller]
if i[0] == msg.userhost:
msg.ticket = i[2]
i[1].put_nowait(msg)
self.delete(msg.ticket)
logging.debug('xmpp.wait - got response for %s' % i[0])
msg.isresponse = 1
@locked
def delete(self, ticket):
""" delete wait item with ticket nr. """
for itemnr in range(len(self.waitlist)-1, -1, -1):
item = self.waitlist[itemnr]
if item[2] == ticket:
item[1].put_nowait(None)
try:
del self.waitlist[itemnr]
logging.debug('sxmpp.wait - deleted ' + str(ticket))
except IndexError:
pass
return 1
class XMPPErrorWait(XMPPWait):
""" wait for jabber errors. """
def check(self, msg):
""" check if <msg> is waited for. """
if not msg.type == 'error':
return
errorcode = msg.error
for teller in range(len(self.waitlist)-1, -1, -1):
i = self.waitlist[teller]
if i[0] == 'ALL' or i[0] == errorcode:
msg.error = msg.error
msg.ticket = i[2]
i[1].put_nowait(msg)
self.delete(msg.ticket)
logging.debug('sxmpp.errorwait - got error response for %s' % i[0])
| Python |
# jsb/socklib/xmpp/presence.py
#
#
""" Presence. """
# jsb imports
from jsb.lib.eventbase import EventBase
from jsb.utils.trace import whichmodule
from jsb.lib.gozerevent import GozerEvent
## basic imports
import time
import logging
## classes
class Presence(GozerEvent):
def __init__(self, nodedict={}):
GozerEvent.__init__(self, nodedict)
self.element = "presence"
self.jabber = True
self.cmnd = "PRESENCE"
self.cbtype = "PRESENCE"
self.bottype = "xmpp"
def parse(self):
""" set ircevent compatible attributes """
self.cmnd = 'PRESENCE'
try: self.nick = self.fromm.split('/')[1]
except (AttributeError, IndexError): self.nick = ""
self.jid = self.jid or self.fromm
self.ruserhost = self.jid
self.userhost = str(self.jid)
self.resource = self.nick
self.stripped = self.jid.split('/')[0]
self.auth = self.stripped
self.channel = self.fromm.split('/')[0]
self.printto = self.channel
self.origtxt = self.txt
self.time = time.time()
if self.type == 'groupchat': self.groupchat = True
else: self.groupchat = False
| Python |
# jsb/socklib/xmpp/bot.py
#
#
""" jabber bot definition """
## jsb imports
from jsb.lib.users import users
from jsb.utils.exception import handle_exception
from jsb.utils.trace import whichmodule
from jsb.utils.locking import lockdec
from jsb.utils.pdod import Pdod
from jsb.utils.dol import Dol
from jsb.utils.generic import stripcolor
from jsb.lib.less import Less
from jsb.lib.callbacks import callbacks, remote_callbacks
from jsb.lib.threads import start_new_thread
from jsb.lib.botbase import BotBase
from jsb.lib.exit import globalshutdown
from jsb.lib.channelbase import ChannelBase
from jsb.lib.fleet import getfleet
## jsb.socket imports
from jsb.utils.generic import waitforqueue, jabberstrip, getrandomnick, toenc, fromenc
## xmpp imports
from jsb.contrib.xmlstream import XMLescape, XMLunescape
from presence import Presence
from message import Message
from iq import Iq
from core import XMLStream
from jid import JID, InvalidJID
from errors import xmpperrors
## basic imports
import time
import Queue
import os
import threading
import thread
import types
import xml
import re
import hashlib
import logging
import cgi
## locks
outlock = thread.allocate_lock()
inlock = thread.allocate_lock()
connectlock = thread.allocate_lock()
outlocked = lockdec(outlock)
inlocked = lockdec(inlock)
connectlocked = lockdec(connectlock)
## SXMPPBot class
class SXMPPBot(XMLStream, BotBase):
"""
xmpp bot class.
"""
def __init__(self, cfg=None, usersin=None, plugs=None, jid=None, *args, **kwargs):
BotBase.__init__(self, cfg, usersin, plugs, jid, *args, **kwargs)
if not self.cfg: raise Exception("sxmpp - config is not set.")
if not self.cfg.user: raise Exception("sxmpp - user is not set.")
try: self.cfg.username, self.cfg.host = self.cfg.user.split('@')
except (ValueError, TypeError): raise Exception("%s - user not set - %s" % (self.cfg.name, str(self.cfg)))
XMLStream.__init__(self, self.cfg.name)
self.type = 'sxmpp'
self.sock = None
self.lastin = None
self.test = 0
self.connecttime = 0
self.connection = None
self.jabber = True
self.jids = {}
self.topics = {}
self.timejoined = {}
self.channels409 = []
if self.state and not self.state.data.ratelimit: self.state.data.ratelimit = 0.02
try: self.cfg.port = int(self.cfg.port)
except (ValueError, TypeError): self.cfg.port = 5222
logging.warn("%s - user is %s" % (self.cfg.name, self.cfg.user))
def _resumedata(self):
""" return data needed for resuming. """
return {self.cfg.name: {
'name': self.cfg.name,
'type': self.type,
'nick': self.cfg.nick,
'server': self.cfg.server,
'port': self.cfg.port,
'password': self.cfg.password,
'ipv6': self.cfg.ipv6,
'user': self.cfg.user
}}
def _keepalive(self):
""" keepalive method .. send empty string to self every 3 minutes. """
nrsec = 0
self.sendpresence()
while not self.stopped:
time.sleep(1)
nrsec += 1
if nrsec < 180: continue
else: nrsec = 0
self.sendpresence()
def sendpresence(self):
""" send presence based on status and status text set by user. """
if self.state:
if self.state.has_key('status') and self.state['status']: status = self.state['status']
else: status = ""
if self.state.has_key('show') and self.state['show']: show = self.state['show']
else: show = ""
else:
status = ""
show = ""
logging.debug('%s - keepalive - %s - %s' % (self.cfg.name, show, status))
if show and status: p = Presence({'to': self.cfg.user, 'show': show, 'status': status})
elif show: p = Presence({'to': self.cfg.user, 'show': show })
elif status: p = Presence({'to': self.cfg.user, 'status': status})
else: p = Presence({'to': self.cfg.user })
self.send(p)
def _keepchannelsalive(self):
""" channels keep alive method. """
nrsec = 0
p = Presence({'to': self.cfg.user, 'txt': '' })
while not self.stopped:
time.sleep(1)
nrsec += 1
if nrsec < 600: continue
else: nrsec = 0
for chan in self.state['joinedchannels']:
if chan not in self.channels409:
p = Presence({'to': chan})
self.send(p)
def connect(self, reconnect=True):
""" connect the xmpp server. """
try:
if not XMLStream.connect(self):
logging.error('%s - connect to %s:%s failed' % (self.cfg.name, self.host, self.port))
return
else: logging.warn('%s - connected' % self.cfg.name)
self.logon(self.cfg.user, self.cfg.password)
start_new_thread(self._keepalive, ())
self.requestroster()
self._raw("<presence/>")
self.connectok.set()
self.sock.settimeout(None)
return True
except Exception, ex:
handle_exception()
if reconnect:
return self.reconnect()
def logon(self, user, password):
""" logon on the xmpp server. """
iq = self.initstream()
if not iq: logging.error("sxmpp - cannot init stream") ; return
if not self.auth(user, password, iq.id):
logging.warn("%s - sleeping 20 seconds before register" % self.cfg.name)
time.sleep(20)
if self.register(user, password):
time.sleep(5)
self.auth(user, password)
else:
time.sleep(10)
self.exit()
return
XMLStream.logon(self)
def initstream(self):
""" send initial string sequence to the xmpp server. """
logging.debug('%s - starting initial stream sequence' % self.cfg.name)
self._raw("""<stream:stream to='%s' xmlns='jabber:client' xmlns:stream='http://etherx.jabber.org/streams'>""" % (self.cfg.user.split('@')[1], ))
result = self.connection.read()
iq = self.loop_one(result)
logging.debug("%s - initstream - %s" % (self.cfg.name, result))
return iq
def register(self, jid, password):
""" register the jid to the server. """
try: resource = jid.split("/")[1]
except IndexError: resource = "jsb"
logging.warn('%s - registering %s' % (self.cfg.name, jid))
self._raw("""<iq type='get'><query xmlns='jabber:iq:register'/></iq>""")
result = self.connection.read()
iq = self.loop_one(result)
if not iq:
logging.error("%s - unable to register" % self.cfg.name)
return
logging.debug('%s - register: %s' % (self.cfg.name, str(iq)))
self._raw("""<iq type='set'><query xmlns='jabber:iq:register'><username>%s</username><resource>%s</resource><password>%s</password></query></iq>""" % (jid.split('@')[0], resource, password))
result = self.connection.read()
logging.debug('%s - register - %s' % (self.cfg.name, result))
if not result: return False
iq = self.loop_one(result)
if not iq:
logging.error("%s - can't decode data - %s" % (self.cfg.name, result))
return False
logging.debug('sxmpp - register - %s' % result)
if iq.error:
logging.warn('%s - register FAILED - %s' % (self.cfg.name, iq.error))
if not iq.error.code: logging.error("%s - can't determine error code" % self.cfg.name) ; return False
if iq.error.code == "405": logging.error("%s - this server doesn't allow registration by the bot, you need to create an account for it yourself" % self.cfg.name)
elif iq.error.code == "500": logging.error("%s - %s - %s" % (self.cfg.name, iq.error.code, iq.error.text))
else: logging.error("%s - %s" % (self.cfg.name, xmpperrors[iq.error.code]))
self.error = iq.error
return False
logging.warn('%s - register ok' % self.cfg.name)
return True
def auth(self, jid, password, digest=""):
""" auth against the xmpp server. """
logging.warn('%s - authing %s' % (self.cfg.name, jid))
name = jid.split('@')[0]
rsrc = self.cfg['resource'] or self.cfg['resource'] or 'jsb';
self._raw("""<iq type='get'><query xmlns='jabber:iq:auth'><username>%s</username></query></iq>""" % name)
result = self.connection.read()
iq = self.loop_one(result)
logging.debug('%s - auth - %s' % (self.cfg.name, result))
if ('digest' in result) and digest:
s = hashlib.new('SHA1')
s.update(digest)
s.update(password)
d = s.hexdigest()
self._raw("""<iq type='set'><query xmlns='jabber:iq:auth'><username>%s</username><digest>%s</digest><resource>%s</resource></query></iq>""" % (name, d, rsrc))
else: self._raw("""<iq type='set'><query xmlns='jabber:iq:auth'><username>%s</username><resource>%s</resource><password>%s</password></query></iq>""" % (name, rsrc, password))
result = self.connection.read()
iq = self.loop_one(result)
if not iq:
logging.error('%s - auth failed - %s' % (self.cfg.name, result))
return False
logging.debug('%s - auth - %s' % (self.cfg.name, result))
if iq.error:
logging.warn('%s - auth failed - %s' % (self.cfg.name, iq.error.code))
if iq.error.code == "401":
logging.warn("%s - wrong user or password" % self.cfg.name)
else:
logging.warn("%s - %s" % (self.cfg.name, result))
self.error = iq.error
return False
logging.warn('%s - auth ok' % self.cfg.name)
return True
def requestroster(self):
""" request roster from xmpp server. """
self._raw("<iq type='get'><query xmlns='jabber:iq:roster'/></iq>")
def disconnectHandler(self, ex):
""" disconnect handler. """
self.reconnect()
def outnocb(self, printto, txt, how=None, event=None, html=False, isrelayed=False, *args, **kwargs):
""" output txt to bot. """
if printto and printto in self.state['joinedchannels']: outtype = 'groupchat'
else: outtype = "chat"
target = printto
if not html:
txt = self.normalize(txt)
repl = Message(event)
repl.to = target
repl.type = outtype
repl.txt = txt
if html:
repl.html = txt
logging.debug("%s - reply is %s" % (self.cfg.name, repl.dump()))
if not repl.type: repl.type = 'normal'
logging.debug("%s - sxmpp - out - %s - %s" % (self.cfg.name, printto, unicode(txt)))
self.send(repl)
def broadcast(self, txt):
""" broadcast txt to all joined channels. """
for i in self.state['joinedchannels']:
self.say(i, txt)
def handle_iq(self, data):
""" iq handler .. overload this when needed. """
pass
def handle_message(self, data):
""" message handler. """
m = Message(data)
m.parse(self)
if data.type == 'groupchat' and data.subject:
logging.debug("%s - checking topic" % self.cfg.name)
self.topiccheck(m)
nm = Message(m)
callbacks.check(self, nm)
return
if data.get('x').xmlns == 'jabber:x:delay':
logging.debug("%s - ignoring delayed message" % self.cfg.name)
return
if m.isresponse:
logging.debug("%s - message is a response" % self.cfg.name)
return
jid = None
m.origjid = m.jid
for node in m.subelements:
try: m.jid = node.x.item.jid
except (AttributeError, TypeError): continue
if self.cfg.user in m.fromm or (m.groupchat and self.cfg.nick == m.nick):
logging.debug("%s - message to self .. ignoring" % self.cfg.name)
return 0
try:
if m.type == 'error':
if m.code:
logging.error('%s - error - %s' % (self.cfg.name, str(m)))
self.errorHandler(m)
except Exception, ex:
handle_exception()
self.put(m)
def errorHandler(self, event):
""" error handler .. calls the errorhandler set in the event. """
try:
logging.error("%s - error occured in %s - %s" % (self.cfg.name, event.txt, event.userhost))
event.errorHandler()
except AttributeError: logging.error('%s - unhandled error - %s' % (self.cfg.name, event.dump()))
def handle_presence(self, data):
""" presence handler. """
p = Presence(data)
p.parse()
frm = p.fromm
nickk = ""
nick = p.nick
if self.cfg.user in p.userhost: return 0
if nick:
self.userhosts[nick] = str(frm)
nickk = nick
jid = None
for node in p.subelements:
try:
jid = node.x.item.jid
except (AttributeError, TypeError):
continue
if nickk and jid:
channel = p.channel
if not self.jids.has_key(channel):
self.jids[channel] = {}
self.jids[channel][nickk] = jid
self.userhosts[nickk] = str(jid)
logging.debug('%s - setting jid of %s (%s) to %s' % (self.cfg.name, nickk, channel, jid))
if p.type == 'subscribe':
pres = Presence({'to': p.fromm, 'type': 'subscribed'})
self.send(pres)
pres = Presence({'to': p.fromm, 'type': 'subscribe'})
self.send(pres)
nick = p.resource
if p.type != 'unavailable':
p.joined = True
p.type = 'available'
elif self.cfg.user in p.userhost:
try:
del self.jids[p.channel]
logging.debug('%s - removed %s channel jids' % (self.cfg.name, p.channel))
except KeyError:
pass
else:
try:
del self.jids[p.channel][p.nick]
logging.debug('%s - removed %s jid' % (self.cfg.name, p.nick))
except KeyError:
pass
if p.type == 'error':
for node in p.subelements:
try:
err = node.error.code
except (AttributeError, TypeError):
err = 'no error set'
try:
txt = node.text.data
except (AttributeError, TypeError):
txt = ""
if err:
logging.error('%s - error - %s - %s' % (self.cfg.name, err, txt))
try:
method = getattr(self,'handle_' + err)
try:
method(p)
except:
handle_exception()
except AttributeError:
pass
self.doevent(p)
def invite(self, jid):
pres = Presence({'to': jid, 'type': 'subscribe'})
self.send(pres)
time.sleep(2)
pres = Presence({'to': jid})
self.send(pres)
def send(self, what):
""" send stanza to the server. """
if not what:
logging.debug("%s - can't send empty message" % self.cfg.name)
return
try:
to = what['to']
except (KeyError, TypeError):
logging.error("%s - can't determine where to send %s to" % (self.cfg.name, str(what)))
return
try:
jid = JID(to)
except (InvalidJID, AttributeError):
logging.error("%s - invalid jid - %s - %s" % (self.cfg.name, str(to), whichmodule(2)))
return
try: del what['from']
except KeyError: pass
try:
xml = what.tojabber()
if not xml:
raise Exception("can't convert %s to xml .. bot.send()" % what)
except (AttributeError, TypeError):
handle_exception()
return
if not self.checkifvalid(xml): logging.error("%s - NOT PROPER XML - %s" % (self.cfg.name, xml))
else: self._raw(xml)
def action(self, printto, txt, fromm=None, groupchat=True, event=None, *args, **kwargs):
""" send an action. """
txt = "/me " + txt
if self.google:
fromm = self.cfg.user
if printto in self.state['joinedchannels'] and groupchat:
message = Message({'to': printto, 'txt': txt, 'type': 'groupchat'})
else: message = Message({'to': printto, 'txt': txt})
if fromm: message.fromm = fromm
self.send(message)
def save(self):
""" save bot's state. """
if self.state:
self.state.save()
def quit(self):
""" send unavailable presence. """
if self.error: return
presence = Presence({'type': 'unavailable' ,'to': self.cfg.user})
if self.state:
for i in self.state.data.joinedchannels:
presence.to = i
self.send(presence)
presence = Presence({'type': 'unavailable', 'to': self.cfg.user})
presence['from'] = self.cfg.user
self.send(presence)
def setstatus(self, status, show=""):
""" send status presence. """
if self.error: return
if self.state:
self.state['status'] = status
self.state['show'] = show
self.state.save()
presence = Presence({'status': status, 'show': show ,'to': self.cfg.user})
self.send(presence)
def shutdown(self):
self.outqueue.put_nowait(None)
def join(self, channel, password=None, nick=None):
""" join conference. """
if channel.startswith("#"): return
try:
if not nick: nick = channel.split('/')[1]
except IndexError: nick = self.cfg.nick or "jsonbot"
channel = channel.split('/')[0]
q = Queue.Queue()
presence = Presence({'to': channel + '/' + nick})
if password:
presence.x.password = password
self.send(presence)
self.timejoined[channel] = time.time()
chan = ChannelBase(channel, self.botname)
chan.data['nick'] = nick
if password:
chan.data['key'] = password
if not chan.data.has_key('cc'):
chan.data['cc'] = self.cfg['defaultcc'] or '!'
if channel not in self.state['joinedchannels']:
self.state['joinedchannels'].append(channel)
self.state.save()
if channel in self.channels409:
self.channels409.remove(channel)
chan.save()
return 1
def part(self, channel):
""" leave conference. """
if channel.startswith("#"): return
presence = Presence({'to': channel})
presence.type = 'unavailable'
self.send(presence)
if channel in self.state['joinedchannels']: self.state['joinedchannels'].remove(channel)
self.state.save()
return 1
def outputnolog(self, printto, what, how, who=None, fromm=None):
""" do output but don't log it. """
if fromm: return
self.saynocb(printto, what)
def topiccheck(self, msg):
""" check if topic is set. """
if msg.groupchat:
try:
topic = msg.subject
if not topic: return None
self.topics[msg.channel] = (topic, msg.userhost, time.time())
logging.debug('%s - topic of %s set to %s' % (self.cfg.name, msg.channel, topic))
except AttributeError: return None
def settopic(self, channel, txt):
""" set topic. """
pres = Message({'to': channel, 'subject': txt})
pres.type = 'groupchat'
self.send(pres)
def gettopic(self, channel):
""" get topic. """
try:
topic = self.topics[channel]
return topic
except KeyError: return None
def domsg(self, msg):
""" dispatch an msg on the bot. """
self.doevent(msg)
def normalize(self, what):
#what = cgi.escape(what)
what = stripcolor(what)
what = what.replace("\002", "")
what = what.replace("\003", "")
what = what.replace("<b>", "")
what = what.replace("</b>", "")
what = what.replace("<b>", "")
what = what.replace("</b>", "")
what = what.replace("<i>", "")
what = what.replace("</i>", "")
what = what.replace("<i>", "")
what = what.replace("</i>", "")
return what
def doreconnect(self):
""" reconnect to the server. """
botjid = self.cfg.user
newbot = getfleet().makebot('sxmpp', self.cfg.name, config=self.cfg)
if not newbot: raise Exception(self.cfg.dump())
newbot.reconnectcount = self.reconnectcount
self.exit()
if newbot.start():
#self.cfg.user += '.old'
newbot.joinchannels()
if fleet.replace(botjid, newbot): return True
return False
| Python |
# jsb/web/event.py
#
#
""" web event. """
## jsb imports
from jsb.lib.eventbase import EventBase
from jsb.utils.generic import splittxt, fromenc, toenc
from jsb.utils.xmpp import stripped
from jsb.lib.outputcache import add
from jsb.utils.url import getpostdata
from jsb.utils.exception import handle_exception
from jsb.lib.channelbase import ChannelBase
## gaelib imports
from jsb.utils.gae.auth import checkuser
## basic imports
import cgi
import logging
import re
## WebEvent class
class WebEvent(EventBase):
def __init__(self, bot=None):
EventBase.__init__(self, bot=bot)
self.bottype = "web"
self.cbtype = "WEB"
self.bot = bot
def __deepcopy__(self, a):
e = WebEvent()
e.copyin(self)
return e
def parse(self, response, request):
""" parse request/response into a WebEvent. """
how = request.get('how')
if not how:
try: how = request.params.getone('how')
except KeyError: how = "normal"
except Exception, ex:
how = "normal"
handle_exception()
if not how:
try: how = request.GET['how']
except KeyError: pass
self.how = how
if self.how == "undefined": self.how = "normal"
logging.warn("web - how is %s" % self.how)
self.webchan = request.get('webchan')
input = request.get('content') or request.get('cmnd')
if not input:
try: input = request.params.getone('content') or request.params.getone('cmnd')
except KeyError: input = ""
except Exception, ex:
input = ""
handle_exception()
if not input:
try: input = request.GET['content'] or request.GET['cmnd']
except KeyError: pass
self.isweb = True
self.origtxt = fromenc(input.strip(), self.bot.encoding)
self.txt = self.origtxt
self.usercmnd = self.txt and self.txt.split()[0]
self.groupchat = False
self.response = response
self.request = request
(userhost, user, u, nick) = checkuser(response, request, self)
self.userhost = fromenc(userhost)
self.nick = fromenc(nick)
self.auth = fromenc(userhost)
self.stripped = stripped(self.auth)
self.domain = None
self.channel = stripped(userhost)
logging.debug(u'web - parsed - %s - %s' % (self.txt, self.userhost))
self.makeargs()
return self
def reply(self, txt, result=[], event=None, origin="", dot=u", ", nr=600, extend=0, *args, **kwargs):
""" reply to this event """#
if self.checkqueues(result): return
if not txt: return
if self.how == "background":
txt = self.bot.makeoutput(self.channel, txt, result, origin=origin, nr=nr, extend=extend, *args, **kwargs)
self.bot.outnocb(self.channel, txt, self.how, response=self.response, event=self)
else: self.bot.say(self.channel, txt, result, self.how, event=self)
return self
| Python |
# jsb/gae/web/bot.py
#
#
""" GAE web bot. """
## jsb imports
from jsb.lib.botbase import BotBase
from jsb.lib.outputcache import add
from jsb.utils.generic import toenc, fromenc, strippedtxt
from jsb.utils.url import re_url_match
from jsb.utils.timeutils import hourmin
from jsb.lib.channelbase import ChannelBase
## basic imports
import logging
import re
import cgi
import urllib
import time
## WebBot class
class WebBot(BotBase):
""" webbot just inherits from botbase for now. """
def __init__(self, cfg=None, users=None, plugs=None, botname="gae-web", *args, **kwargs):
BotBase.__init__(self, cfg, users, plugs, botname, *args, **kwargs)
assert self.cfg
self.isgae = True
self.type = u"web"
def _raw(self, txt, response, end=u"<br>"):
""" put txt to the client. """
if not txt: return
txt = txt + end
logging.debug("%s - out - %s" % (self.cfg.name, txt))
response.out.write(txt)
def outnocb(self, channel, txt, how="cache", event=None, origin=None, response=None, dotime=False, *args, **kwargs):
txt = self.normalize(txt)
if event and event.how != "background":
logging.warn("%s - out - %s" % (self.cfg.name, txt))
if "http://" in txt or "https://" in txt:
for item in re_url_match.findall(txt):
logging.debug("web - raw - found url - %s" % item)
url = u'<a href="%s" onclick="window.open(\'%s\'); return false;">%s</a>' % (item, item, item)
try: txt = re.sub(item, url, txt)
except ValueError: logging.error("web - invalid url - %s" % url)
if response: self._raw(txt, response)
else: self.update_web(channel, txt)
def normalize(self, txt):
#txt = cgi.escape(txt)
txt = txt.replace("<br>", "<br>")
txt = txt.replace("<b>", "<b>")
txt = txt.replace("</b>", "</b>")
txt = txt.replace("<i>", "<i>")
txt = txt.replace("</i>", "</i>")
txt = txt.replace("<h2>", "<h2>")
txt = txt.replace("</h2>", "</h2>")
txt = txt.replace("<h3>", "<h3>")
txt = txt.replace("</h3>", "</h3>")
txt = txt.replace("<li>", "<li>")
txt = txt.replace("</li>", "</li>")
#txt = txt.replace("<", "<")
#txt = txt.replace(">", ">")
txt = strippedtxt(txt)
return txt
def update_web(self, channel, txt, end="<br>"):
from google.appengine.api.channel import channel as gchan
chan = ChannelBase(channel, botname="gae-web")
#logging.warn("%s - webchannels are %s" % (self.cfg.name, chan.data.webchannels))
remove = []
for c in chan.data.webchannels:
try:
if c:
logging.debug("%s - sending to channel %s" % (self.cfg.name, chan))
gchan.send_message(c, txt + end)
except gchan.InvalidChannelClientIdError:
remove.append(c)
if remove:
for c in remove: chan.data.webchannels.remove(c) ; logging.debug("%s - closing channel %s" % (self.cfg.name, chan))
chan.save()
| Python |
# jsb/gae/wave/event.py
#
#
""" google wave events. """
## jsb imports
from jsb.lib.eventbase import EventBase
from jsb.utils.exception import handle_exception
from jsb.utils.gae.auth import finduser
from jsb.drivers.gae.wave.waves import Wave
## basic imports
import logging
import cgi
import re
import time
## defines
findurl = re.compile(u"(http://.*)?")
## WaveEventclass
class WaveEvent(EventBase):
""" a wave event. """
def __init__(self):
EventBase.__init__(self)
self.bottype = "wave"
self.msg = False
self.target = None
self.roottarget = None
self.rootreply = None
self.gadget = None
self.result = []
self.cbtype = "BLIP_SUBMITTED"
def parse(self, bot, event, wavelet):
""" parse properties and context into a WaveEvent. """
self.bot = bot
self.eventin = event
self.wavelet = wavelet
self.waveid = self.wavelet._wave_id
self.blipid = self.eventin.blip_id
self.blip = self.eventin.blip
if not self.blip:
logging.warn("can't get blip id: %s" % self.blipid)
self.contributors = []
self.txt = ""
self.usercmnd = ""
self.userhost = ""
self.ispoller = False
else:
self.contributors = self.blip._contributors
self.origtxt = self.blip._content.strip()
self.txt = self.origtxt
if len(self.txt) >= 2: self.usercmnd = self.txt[1:].split()[0]
else: self.usercmnd = None
self.userhost = self.blip._creator
self.elements = self.blip._elements
for nr, elem in self.elements.iteritems():
logging.debug("wave - element - %s - %s" % (str(elem), dir(elem)))
if elem.get('ispoller') == 'yes': self.ispoller = True
if elem.get('gadgetcmnd') == 'yes':
self.cbtype = "DISPATCH"
logging.debug("wave.event - dispatch - %s" % str(elem))
self.txt = u"!" + elem.get("cmnd")
self.channel = self.waveid = elem.get("waveid")
self.gadgetnr = nr
self.cmndhow = elem.get('how')
self.userhost = elem.get('who')
self.auth = self.userhost
self.nick = self.userhost.split("@")[0]
logging.debug("wave - event - auth is %s" % self.auth)
self.root = wavelet
self.rootblipid = wavelet._root_blip.blip_id
self.rootblip = wavelet._root_blip
self.raw_data = self.root._raw_data
self.domain = self.wavelet.domain
self.channel = self.waveid
self.origin = self.channel
self.title = self.root._title or self.channel
self.cmnd = self.cbtype = event.type
if 'sandbox' in self.waveid:
self.url = "https://wave.google.com/a/wavesandbox.com/#restored:wave:%s" % self.waveid.replace('w+','w%252B')
else:
self.url = "https://wave.google.com/wave/#restored:wave:%s" % self.waveid.replace('w+','w%252B')
#self.chan = Wave(self.waveid)
#self.chan.parse(self.eventin, self.wavelet)
self.bind(self.bot, chan=Wave(self.channel))
self.makeargs()
logging.debug(u'wave - in - %s - %s - %s' % (self.title, self.userhost, self.txt))
def __deepcopy__(self, a):
""" deepcopy a wave event. """
e = WaveEvent()
e.copyin(self)
return e
def toppost(self, txt):
""" append to rootblip. """
reply = self.rootblip.reply()
reply.append(txt)
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return reply
def insert_root(self, item):
""" insert item in rootblip. """
reply = self.rootblip.append(item)
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self
def set_title(self, title, cloned=False):
""" set title of wave. """
if cloned and self.chan and self.chan.data.nrcloned:
title = "#".join(title.split("#")[:-1])
title += "#%s" % str(self.chan.data.nrcloned)
logging.info("wave - setting title - %s" % title)
self.root._set_title(title)
return self
def append(self, item, annotations=None):
""" append to new blip, or one if already created. use annotations if provided. """
if not self.target and self.blip: self.target = self.blip.reply()
self.result.append(item)
try: self.target.append(item)
except Exception, ex: handle_exception()
logging.debug("wave - append - annotations are %s" % str(annotations))
if annotations:
for ann in annotations:
if ann[0]:
try: self.target.range(ann[0], ann[1]).annotate(ann[2], ann[3])
except Exception, ex: handle_exception()
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self
def append_root(self, item , annotations=None):
""" append to rootblip. use annotations if provided. """
if not self.roottarget: self.roottarget = self.rootblip.reply()
self.roottarget.append(item)
self.result.append(item)
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self.roottarget
def appendtopper(self, item):
""" top post. """
self.rootblip.append(item)
self.result.append(item)
if self.chan:
self.chan.data.seenblips += 1
self.chan.data.lastedited = time.time()
return self.rootblip
def replyroot(self, txt, resultlist=[], event=None, origin="", dot=", ", *args, **kwargs):
""" reply to wave root. """
if self.checkqueues(resultlist): return
outtxt = self.makeresponse(txt, resultlist, dot, *args, **kwargs)
if not outtxt: return
self.result.append(outtxt)
(res1, res2) = self.less(outtxt)
self.write_root(res1)
def write_root(self, outtxt, end="\n", root=None):
""" write to the root of a wave. """
self.append_root(outtxt + end)
self.replied = True
self.bot.outmonitor(self.origin, self.channel, outtxt, self)
def submit(self):
""" submit event to the bot. """
self.bot.submit(self.wavelet)
| Python |
# jsb/wave/waves.py
#
#
""" class to repesent a wave. """
## jsb imports
from jsb.lib.channelbase import ChannelBase
from jsb.utils.exception import handle_exception
from jsb.utils.locking import lockdec
from jsb.utils.generic import strippedtxt, toenc, fromenc
## basic imports
import logging
import copy
import os
import time
import re
import thread
## defines
findurl = re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?/[-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]*[^]'\\.}>\\),\\\"]")
cpy = copy.deepcopy
saylock = thread.allocate_lock()
saylocked = lockdec(saylock)
## Wave class (channel)
class Wave(ChannelBase):
""" a wave is seen as a channel. """
def __init__(self, waveid, botname=None):
ChannelBase.__init__(self, waveid, botname)
self.data.seenblips = self.data.seenblips or 0
self.data.threshold = self.data.threshold or -1
self.data.nrcloned = self.data.nrcloned or 0
self.data.waveid = waveid
self.wavelet = None
self.event = None
logging.debug("created wave with id: %s" % waveid)
def parse(self, event, wavelet):
""" parse event into a Wave. """
self.data.json_data = wavelet.serialize()
self.data.title = wavelet._title
self.data.waveletid = wavelet._wavelet_id
self.wavelet = wavelet
self.event = event
logging.debug("parsed %s (%s) channel" % (self.data.waveid, self.data.title))
return self
def set_title(self, title, cloned=False):
""" set title of wave. """
self.event.set_title(title, cloned)
def clone(self, bot, event, title=None, report=False, participants=[]):
""" clone the wave into a new one. """
if participants: parts = participants
else: parts = list(event.root.participants)
newwave = bot.newwave(event.domain, parts)
logging.info("wave - clone - populating wave with %s" % str(parts))
for id in parts: newwave.participants.add(id)
if title:
if '#' in title:
title = "#".join(title.split("#")[:-1])
title += "#%s" % str(self.data.nrcloned + 1)
else: title += " - #%s" % str(self.data.nrcloned + 1)
newwave._set_title(title)
if report:
try: txt = '\n'.join(event.rootblip.text.split('\n')[2:])
except IndexError: txt = event.rootblip.text
newwave._root_blip.append(u'%s\n' % txt)
for element in event.rootblip.elements:
if element.type == 'GADGET': newwave._root_blip.append(element)
blip = newwave.reply()
blip.append("\nthis wave is cloned from %s\n" % event.url)
else: newwave._root_blip.append("PROTECTED WAVE")
wavelist = bot.submit(newwave)
logging.info("wave - clone - %s - submit returned %s" % (list(newwave.participants), str(wavelist)))
if not wavelist:
logging.warn("submit of new wave failed")
return
try:
waveid = None
for item in wavelist:
try: waveid = item['data']['waveId']
except (KeyError, ValueError): continue
logging.info("wave - newwave id is %s" % waveid)
if not waveid:
logging.error("can't extract waveid from submit data")
return
if waveid and 'sandbox' in waveid:
url = "https://wave.google.com/a/wavesandbox.com/#restored:wave:%s" % waveid.replace('w+','w%252B')
else:
url = "https://wave.google.com/wave/#restored:wave:%s" % waveid.replace('w+','w%252B')
oldwave = Wave(event.waveid)
oldwave.data.threshold = -1
oldwave.save()
wave = Wave(waveid)
wave.parse(event, newwave)
wave.data.json_data = newwave.serialize()
wave.data.threshold = self.data.threshold or 200
wave.data.nrcloned = self.data.nrcloned + 1
wave.data.url = url
wave.save()
except Exception, ex:
handle_exception()
return
return wave
@saylocked
def say(self, bot, txt):
""" output some txt to the wave. """
if self.data.json_data: wavelet = bot.blind_wavelet(self.data.json_data)
else:
logging.info("did not join channel %s" % self.id)
return
if not wavelet:
logging.error("cant get wavelet")
return
txt = bot.normalize(txt)
txt = unicode(txt.strip())
logging.debug(u'wave - out - %s - %s' % (self.data.title, txt))
try:
annotations = []
for url in txt.split():
got = url.find("http://")
if got != -1:
logging.debug("wave - found url - %s" % str(url))
start = txt.find(url.strip())
if url.endswith(">"): annotations.append((start+2, start+len(url)-1, "link/manual", url[1:-1]))
else: annotations.append((start, start+len(url), "link/manual", url))
except Exception, ex: handle_exception()
logging.debug("annotations used: %s", annotations)
reply = wavelet.reply(txt)
if annotations:
for ann in annotations:
if ann[0]:
try: reply.range(ann[0], ann[1]).annotate(ann[2], ann[3])
except Exception, ex: handle_exception()
logging.info("submitting to server: %s" % wavelet.serialize())
try:
import google
bot.submit(wavelet)
except google.appengine.api.urlfetch_errors.DownloadError: handle_exception()
self.data.seenblips += 1
self.data.lastedited = time.time()
#self.save()
saynocb = say
def toppost(self, bot, txt):
""" toppost some txt to the wave. """
if self.data.json_data:
logging.debug("wave - say - using BLIND - %s" % self.data.json_data)
wavelet = bot.blind_wavelet(self.data.json_data)
else:
logging.info("did not join channel %s" % self.id)
return
if not wavelet:
logging.error("cant get wavelet")
return
logging.debug('wave - out - %s - %s' % (self.data.title, txt))
try:
import google
blip = wavelet._root_blip.reply()
blip.append(txt)
bot.submit(wavelet)
except google.appengine.api.urlfetch_errors.DownloadError: handle_exception()
self.data.seenblips += 1
self.data.lastedited = time.time()
self.save()
| Python |
# jsb/wave/bot.py
#
#
""" google wave bot. """
## jsb imports
from jsb.lib.persist import Persist
from jsb.lib.botbase import BotBase
from jsb.lib.plugins import plugs
from jsb.version import getversion
from jsb.lib.callbacks import callbacks
from jsb.lib.outputcache import add
from jsb.lib.config import Config, getmainconfig
from jsb.utils.locking import lockdec
from jsb.utils.exception import handle_exception
from jsb.utils.generic import strippedtxt
from jsb.utils.trace import calledfrom
from jsb.lib.jsbimport import _import_byfile
from jsb.lib.datadir import getdatadir
import jsb.contrib.simplejson as simplejson
## gaelib imports
from jsb.utils.gae.auth import finduser
from event import WaveEvent
from waves import Wave
## waveapi v2 imports
from waveapi import events
from waveapi import robot
from waveapi import element
from waveapi import ops
from waveapi import blip
from waveapi import appengine_robot_runner
import waveapi
## generic imports
import logging
import cgi
import os
import time
import thread
import sys
## defines
saylock = thread.allocate_lock()
saylocked = lockdec(saylock)
## WaveBot claass
class WaveBot(BotBase, robot.Robot):
""" bot to implement google wave stuff. """
def __init__(self, cfg=None, users=None, plugs=None, name=None, domain=None,
image_url='http://jsonbot.appspot.com/assets/favicon.png',
profile_url='http://jsonbot.appspot.com/', *args, **kwargs):
sname = 'jsb'
BotBase.__init__(self, cfg, users, plugs, name, *args, **kwargs)
assert self.cfg
self.type = 'wave'
if domain: self.cfg.domain = domain
else: self.cfg.domain = getmainconfig().domain or "wave,google.com"
self.cfg.nick = name or "gae-wave"
robot.Robot.__init__(self, name=self.cfg.name, image_url=image_url, profile_url=profile_url)
credentials = _import_byfile("credentials", getdatadir() + os.sep + "config" + os.sep + "credentials.py")
self.set_verification_token_info(credentials.verification_token[self.cfg.domain], credentials.verification_secret[self.cfg.domain])
self.setup_oauth(credentials.Consumer_Key[self.cfg.domain], credentials.Consumer_Secret[self.cfg.domain],
server_rpc_base=credentials.RPC_BASE[self.cfg.domain])
self.register_handler(events.BlipSubmitted, self.OnBlipSubmitted)
self.register_handler(events.WaveletSelfAdded, self.OnSelfAdded)
self.register_handler(events.WaveletParticipantsChanged, self.OnParticipantsChanged)
self.iswave = True
self.isgae = True
def OnParticipantsChanged(self, event, wavelet):
""" invoked when any participants have been added/removed. """
wevent = WaveEvent()
wevent.parse(self, event, wavelet)
whitelist = wevent.chan.data.whitelist
if not whitelist: whitelist = wevent.chan.data.whitelist = []
participants = event.participants_added
logging.warning("wave - %s - %s joined" % (wevent.chan.data.title, participants))
if wevent.chan.data.protected:
for target in participants:
if target not in whitelist and target != 'jsb@appspot.com' and target != wevent.chan.data.owner:
logging.warn("wave - %s - setting %s to read-only" % (wevent.chan.data.title, target))
wevent.root.participants.set_role(target, waveapi.wavelet.Participants.ROLE_READ_ONLY)
callbacks.check(self, wevent)
def OnSelfAdded(self, event, wavelet):
""" invoked when the robot has been added. """
logging.warn('wave - joined "%s" (%s) wave' % (wavelet._wave_id, wavelet._title))
wevent = WaveEvent()
wevent.parse(self, event, wavelet)
logging.debug("wave - owner is %s" % wevent.chan.data.owner)
wevent.chan.data.json_data = wavelet.serialize()
wevent.chan.save()
wevent.reply("Welcome to %s (see !help) or http://jsonbot.appspot.com/docs/html/index.html" % getversion())
callbacks.check(self, wevent)
def OnBlipSubmitted(self, event, wavelet):
""" new blip added. here is where the command dispatching takes place. """
wevent = WaveEvent()
wevent.parse(self, event, wavelet)
wevent.auth = wevent.userhost
wave = wevent.chan
#wave.data.seenblips += 1
wave.data.lastedited = time.time()
self.doevent(wevent)
def _raw(self, txt, event=None, *args, **kwargs):
""" output some txt to the wave. """
assert event.chan
if not event.chan: logging.error("%s - event.chan is not set" % self.cfg.name) ; return
if event.chan.data.json_data: wavelet = self.blind_wavelet(event.chan.data.json_data)
else: logging.warn("did not join channel %s" % event.chan.data.id) ; return
if not wavelet: logging.error("cant get wavelet") ; return
txt = self.normalize(txt)
txt = unicode(txt.strip())
logging.warn("%s - wave - out - %s" % (self.cfg.name, txt))
try:
annotations = []
for url in txt.split():
got = url.find("http://")
if got != -1:
logging.debug("wave - found url - %s" % str(url))
start = txt.find(url.strip())
if url.endswith(">"): annotations.append((start+2, start+len(url)-1, "link/manual", url[1:-1]))
else: annotations.append((start, start+len(url), "link/manual", url))
except Exception, ex: handle_exception()
logging.debug("annotations used: %s", annotations)
reply = wavelet.reply(txt)
if annotations:
for ann in annotations:
if ann[0]:
try: reply.range(ann[0], ann[1]).annotate(ann[2], ann[3])
except Exception, ex: handle_exception()
logging.info("submitting to server: %s" % wavelet.serialize())
try:
import google
self.submit(wavelet)
except google.appengine.api.urlfetch_errors.DownloadError: handle_exception()
def outnocb(self, waveid, txt, result=[], event=None, origin="", dot=", ", *args, **kwargs):
""" output to the root id. """
if not self._server_rpc_base or not (self.cfg.domain in self._server_rpc_base):
credentials = _import_byfile("credentials", getdatadir() + os.sep + "config" + os.sep + "credentials.py")
rpc_base = credentials.RPC_BASE[waveid.split("!")[0]]
self._server_rpc_base = rpc_base
logging.warn("%s - %s - server_rpc_base is %s" % (self.cfg.name, waveid, self._server_rpc_base))
if not event: logging.error("wave - event not set - %s" % calledfrom(sys._getframe(0)))
logging.warn("wave - creating new event.")
wave = Wave(waveid)
wave.say(self, txt)
def toppost(self, waveid, txt):
""" output to the root id. """
if not self.cfg.domain in waveid:
logging.warn("%s - not connected - %s" % (self.cfg.name, waveid))
return
wave = Wave(waveid)
if wave and wave.data.waveid: wave.toppost(self, txt)
else: logging.warn("%s - we are not joined to %s" % (self.cfg.name, waveid))
def newwave(self, domain=None, participants=None, submit=False):
""" create a new wave. """
logging.info("wave - new wave on domain %s" % domain)
newwave = self.new_wave(domain or self.cfg.domain, participants=participants, submit=submit)
return newwave
def run(self):
""" start the bot on the runner. """
appengine_robot_runner.run(self, debug=True, extra_handlers=[])
def normalize(self, what):
""" convert markup to IRC bold. """
txt = strippedtxt(what)
txt = txt.replace("<b>", "")
txt = txt.replace("</b>", "")
txt = txt.replace("<i>", "")
txt = txt.replace("</i>", "")
txt = txt.replace("<b>", "")
txt = txt.replace("</b>", "")
txt = txt.replace("<i>", "")
txt = txt.replace("</i>", "")
txt = txt.replace("<h2>", "")
txt = txt.replace("</h2>", "")
txt = txt.replace("<h3>", "")
txt = txt.replace("</h3>", "")
txt = txt.replace("<li>", "")
txt = txt.replace("</li>", "")
return txt
| Python |
# jsb/gae/tasks.py
#
#
""" appengine tasks related classes and functions. """
## jsb imports
from jsb.utils.exception import handle_exception
## google imports
from google.appengine.api.labs.taskqueue import Task, Queue
## simplejson imports
from jsb.imports import getjson
json = getjson()
## basic imports
import uuid
## Event Classes
class BotEvent(Task):
pass
## defines
queues = []
for i in range(9):
queues.append(Queue("queue" + str(i)))
## start_botevent function
def start_botevent(bot, event, speed=5):
""" start a new botevent task. """
try:
try: speed = int(speed)
except: speed = 5
event.botevent = True
if event.usercmnd[0] == "!": e = event.usercmnd[1:]
else: e = event.usercmnd
name = e + "-" + str(uuid.uuid4())
payload = json.dumps({ 'bot': bot.tojson(),
'event': event.tojson()
})
be = BotEvent(name=name, payload=payload, url="/tasks/botevent")
try: queues[int(speed)].add(be)
except TypeError: queues[speed].add(be)
except Exception, ex:
handle_exception()
| Python |
# gaelib/xmpp/event.py
#
#
""" an xmpp event. """
## jsb imports
from jsb.lib.eventbase import EventBase
from jsb.utils.xmpp import stripped, resource
from jsb.utils.lazydict import LazyDict
from jsb.utils.gae.auth import checkuser
from jsb.utils.generic import strippedtxt
## xmpp import
from jsb.contrib.xmlstream import XMLescape, XMLunescape
## basic imports
import cgi
import logging
import re
## XMPPEvent class
class XMPPEvent(EventBase):
""" an XMPP event. """
def __init__(self, bot=None):
EventBase.__init__(self)
self.bottype = "xmpp"
self.cbtype = 'MESSAGE'
self.bot = bot
def __deepcopy__(self, a):
""" make a deepcopy of this XMPPEvent. """
return XMPPEvent().copyin(self)
def parse(self, request, response):
""" parse incoming request/response into a XMPPEvent. """
self.copyin(LazyDict(request.POST))
(userhost, user, u, nick) = checkuser(response, request)
self.userhost = self['from']
self.origin = self.channel
if user: self.auth = user.email()
else: self.auth = stripped(self.userhost)
logging.info('xmpp - auth is %s' % self.auth)
self.resource = resource(self['from'])
self.jid = self['from']
self.to = stripped(self['to'])
self.channel = stripped(self.userhost)
self.stripped = stripped(self.userhost)
self.nick = self.stripped.split("@")[0]
self.origin = self.channel
input = self.body or self.stanza
input = input.strip()
self.origtxt = input
self.txt = input
self.usercmnd = self.txt.split()[0].lower()
logging.debug(u'xmpp - in - %s - %s' % (self.userhost, self.txt))
return self
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.